diff --git a/.github/scripts/python.sh b/.github/scripts/python.sh
index d026aa123..08b8084a0 100644
--- a/.github/scripts/python.sh
+++ b/.github/scripts/python.sh
@@ -9,14 +9,13 @@ set -x -e
# install TBB with _debug.so files
function install_tbb()
{
+ echo install_tbb
if [ "$(uname)" == "Linux" ]; then
sudo apt-get -y install libtbb-dev
elif [ "$(uname)" == "Darwin" ]; then
brew install tbb
-
fi
-
}
if [ -z ${PYTHON_VERSION+x} ]; then
@@ -37,19 +36,18 @@ function install_dependencies()
export PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
- [ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
-
- $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
+ if [ "${GTSAM_WITH_TBB:-OFF}" == "ON" ]; then
+ install_tbb
+ fi
}
function build()
{
- mkdir $GITHUB_WORKSPACE/build
- cd $GITHUB_WORKSPACE/build
-
+ export CMAKE_GENERATOR=Ninja
BUILD_PYBIND="ON"
-
- cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
+ cmake $GITHUB_WORKSPACE \
+ -B build \
+ -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
-DGTSAM_BUILD_TESTS=OFF \
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
-DGTSAM_USE_QUATERNIONS=OFF \
@@ -65,16 +63,16 @@ function build()
# Set to 2 cores so that Actions does not error out during resource provisioning.
- make -j2 install
+ cmake --build build -j2
- cd $GITHUB_WORKSPACE/build/python
- $PYTHON -m pip install --user .
+ $PYTHON -m pip install --user build/python
}
function test()
{
cd $GITHUB_WORKSPACE/python/gtsam/tests
$PYTHON -m unittest discover -v
+ cd $GITHUB_WORKSPACE
}
# select between build or test
diff --git a/.github/scripts/unix.sh b/.github/scripts/unix.sh
index 557255474..09fcd788b 100644
--- a/.github/scripts/unix.sh
+++ b/.github/scripts/unix.sh
@@ -5,33 +5,30 @@
# Specifically Linux and macOS.
##########################################################
+set -e # Make sure any error makes the script to return an error code
+set -x # echo
+
# install TBB with _debug.so files
function install_tbb()
{
+ echo install_tbb
if [ "$(uname)" == "Linux" ]; then
sudo apt-get -y install libtbb-dev
elif [ "$(uname)" == "Darwin" ]; then
brew install tbb
-
fi
-
}
# common tasks before either build or test
function configure()
{
- set -e # Make sure any error makes the script to return an error code
- set -x # echo
+ # delete old build
+ rm -rf build
- SOURCE_DIR=$GITHUB_WORKSPACE
- BUILD_DIR=$GITHUB_WORKSPACE/build
-
- #env
- rm -fr $BUILD_DIR || true
- mkdir $BUILD_DIR && cd $BUILD_DIR
-
- [ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
+ if [ "${GTSAM_WITH_TBB:-OFF}" == "ON" ]; then
+ install_tbb
+ fi
if [ ! -z "$GCC_VERSION" ]; then
export CC=gcc-$GCC_VERSION
@@ -40,7 +37,9 @@ function configure()
# GTSAM_BUILD_WITH_MARCH_NATIVE=OFF: to avoid crashes in builder VMs
# CMAKE_CXX_FLAGS="-w": Suppress warnings to avoid IO latency in CI logs
- cmake $SOURCE_DIR \
+ export CMAKE_GENERATOR=Ninja
+ cmake $GITHUB_WORKSPACE \
+ -B build \
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Debug} \
-DCMAKE_CXX_FLAGS="-w" \
-DGTSAM_BUILD_TESTS=${GTSAM_BUILD_TESTS:-OFF} \
@@ -62,9 +61,9 @@ function configure()
function finish ()
{
# Print ccache stats
- [ -x "$(command -v ccache)" ] && ccache -s
-
- cd $SOURCE_DIR
+ if [ -x "$(command -v ccache)" ]; then
+ ccache -s
+ fi
}
# compile the code with the intent of populating the cache
@@ -77,12 +76,12 @@ function build ()
if [ "$(uname)" == "Linux" ]; then
if (($(nproc) > 2)); then
- make -j4
+ cmake --build build -j4
else
- make -j2
+ cmake --build build -j2
fi
elif [ "$(uname)" == "Darwin" ]; then
- make -j$(sysctl -n hw.physicalcpu)
+ cmake --build build -j$(sysctl -n hw.physicalcpu)
fi
finish
@@ -99,12 +98,12 @@ function test ()
# Actual testing
if [ "$(uname)" == "Linux" ]; then
if (($(nproc) > 2)); then
- make -j$(nproc) check
+ cmake --build build -j$(nproc) --target check
else
- make -j2 check
+ cmake --build build -j2 --target check
fi
elif [ "$(uname)" == "Darwin" ]; then
- make -j$(sysctl -n hw.physicalcpu) check
+ cmake --build build -j$(sysctl -n hw.physicalcpu) --target check
fi
finish
@@ -118,4 +117,4 @@ case $1 in
-t)
test
;;
-esac
\ No newline at end of file
+esac
diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml
index e4937ce06..5de09c63f 100644
--- a/.github/workflows/build-linux.yml
+++ b/.github/workflows/build-linux.yml
@@ -73,7 +73,7 @@ jobs:
fi
sudo apt-get -y update
- sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev
+ sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev ninja-build
if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml
index ca4645a77..91bc4e80a 100644
--- a/.github/workflows/build-python.yml
+++ b/.github/workflows/build-python.yml
@@ -75,7 +75,7 @@ jobs:
fi
sudo apt-get -y update
- sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libboost-all-dev
+ sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libboost-all-dev ninja-build
if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
@@ -109,10 +109,13 @@ jobs:
with:
swap-size-gb: 6
- - name: Install Dependencies
+ - name: Install System Dependencies
run: |
bash .github/scripts/python.sh -d
+ - name: Install Python Dependencies
+ run: python$PYTHON_VERSION -m pip install -r python/dev_requirements.txt
+
- name: Build
run: |
bash .github/scripts/python.sh -b
diff --git a/.github/workflows/build-special.yml b/.github/workflows/build-special.yml
index 72466ffd6..164646e3e 100644
--- a/.github/workflows/build-special.yml
+++ b/.github/workflows/build-special.yml
@@ -103,7 +103,7 @@ jobs:
sudo add-apt-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy main"
fi
- sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev
+ sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev ninja-build
if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml
index f4442bdde..f0568394f 100644
--- a/.github/workflows/build-windows.yml
+++ b/.github/workflows/build-windows.yml
@@ -34,6 +34,7 @@ jobs:
Debug,
Release
]
+
build_unstable: [ON]
include:
- name: windows-2019-cl
@@ -93,10 +94,23 @@ jobs:
- name: Checkout
uses: actions/checkout@v3
+ - name: Setup msbuild
+ uses: ilammy/msvc-dev-cmd@v1
+ with:
+ arch: x${{ matrix.platform }}
+
- name: Configuration
+ shell: bash
run: |
+ export CMAKE_GENERATOR=Ninja
cmake -E remove_directory build
- cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib"
+ cmake -B build \
+ -S . \
+ -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
+ -DGTSAM_ALLOW_DEPRECATED_SINCE_V43=OFF \
+ -DBOOST_ROOT="${BOOST_ROOT}" \
+ -DBOOST_INCLUDEDIR="${BOOST_ROOT}\boost\include" \
+ -DBOOST_LIBRARYDIR="${BOOST_ROOT}\lib"
- name: Build
shell: bash
@@ -106,9 +120,6 @@ jobs:
cmake --build build -j4 --config ${{ matrix.build_type }} --target gtsam
cmake --build build -j4 --config ${{ matrix.build_type }} --target gtsam_unstable
- # Target doesn't exist
- # cmake --build build -j4 --config ${{ matrix.build_type }} --target wrap
-
- name: Test
shell: bash
run: |
@@ -116,50 +127,33 @@ jobs:
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.base
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.basis
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.discrete
-
- # Compilation error
- # cmake --build build -j4 --config ${{ matrix.build_type }} --target check.geometry
-
+ cmake --build build -j4 --config ${{ matrix.build_type }} --target check.geometry
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.inference
-
- # Compile. Fail with exception
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.linear
-
- cmake --build build -j4 --config ${{ matrix.build_type }} --target check.navigation
+ cmake --build build -j4 --config ${{ matrix.build_type }} --target check.navigation
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.sam
- cmake --build build -j4 --config ${{ matrix.build_type }} --target check.sfm
+ cmake --build build -j4 --config ${{ matrix.build_type }} --target check.sfm
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.symbolic
-
- # Compile. Fail with exception
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.hybrid
-
- # Compile. Fail with exception
- # cmake --build build -j4 --config ${{ matrix.build_type }} --target check.nonlinear
-
- # Compilation error
- # cmake --build build -j4 --config ${{ matrix.build_type }} --target check.slam
-
+ cmake --build build -j4 --config ${{ matrix.build_type }} --target check.nonlinear
+ cmake --build build -j4 --config ${{ matrix.build_type }} --target check.slam
# Run GTSAM_UNSTABLE tests
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.base_unstable
-
# Compile. Fail with exception
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.geometry_unstable
-
# Compile. Fail with exception
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.linear_unstable
-
# Compile. Fail with exception
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.discrete_unstable
-
# Compile. Fail with exception
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.dynamics_unstable
-
# Compile. Fail with exception
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.nonlinear_unstable
-
- # Compilation error
+ # Compile. Fail with exception
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.slam_unstable
+ # Compile. Fail with exception
+ # cmake --build build -j4 --config ${{ matrix.build_type }} --target check.partition
- # Compilation error
- # cmake --build build -j4 --config ${{ matrix.build_type }} --target check.partition
+ # Run all tests
+ # cmake --build build -j1 --config ${{ matrix.build_type }} --target check
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8b56ea9df..16848a721 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,11 +1,5 @@
cmake_minimum_required(VERSION 3.0)
-# new feature to Cmake Version > 2.8.12
-# Mac ONLY. Define Relative Path on Mac OS
-if(NOT DEFINED CMAKE_MACOSX_RPATH)
- set(CMAKE_MACOSX_RPATH 0)
-endif()
-
# Set the version number for the library
set (GTSAM_VERSION_MAJOR 4)
set (GTSAM_VERSION_MINOR 3)
@@ -13,18 +7,16 @@ set (GTSAM_VERSION_PATCH 0)
set (GTSAM_PRERELEASE_VERSION "a0")
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
-if (${GTSAM_VERSION_PATCH} EQUAL 0)
- set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}${GTSAM_PRERELEASE_VERSION}")
+if ("${GTSAM_PRERELEASE_VERSION}" STREQUAL "")
+ set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
else()
- set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}${GTSAM_PRERELEASE_VERSION}")
+ set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}${GTSAM_PRERELEASE_VERSION}")
endif()
project(GTSAM
LANGUAGES CXX C
VERSION "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
-message(STATUS "GTSAM Version: ${GTSAM_VERSION_STRING}")
-
set (CMAKE_PROJECT_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
set (CMAKE_PROJECT_VERSION_MINOR ${GTSAM_VERSION_MINOR})
set (CMAKE_PROJECT_VERSION_PATCH ${GTSAM_VERSION_PATCH})
@@ -44,13 +36,6 @@ set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${CMAKE_CURRENT_SOURCE_DIR}/cmake"
include(GtsamMakeConfigFile)
include(GNUInstallDirs)
-# Load build type flags and default to Debug mode
-include(GtsamBuildTypes)
-
-# Use macros for creating tests/timing scripts
-include(GtsamTesting)
-include(GtsamPrinting)
-
# guard against in-source builds
if(${GTSAM_SOURCE_DIR} STREQUAL ${GTSAM_BINARY_DIR})
message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ")
@@ -58,6 +43,13 @@ endif()
include(cmake/HandleGeneralOptions.cmake) # CMake build options
+# Load build type flags and default to Debug mode
+include(GtsamBuildTypes)
+
+# Use macros for creating tests/timing scripts
+include(GtsamTesting)
+include(GtsamPrinting)
+
############### Decide on BOOST ######################################
# Enable or disable serialization with GTSAM_ENABLE_BOOST_SERIALIZATION
option(GTSAM_ENABLE_BOOST_SERIALIZATION "Enable Boost serialization" ON)
diff --git a/INSTALL.md b/INSTALL.md
index f148e3718..10bee196c 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -182,7 +182,7 @@ Here are some tips to get the best possible performance out of GTSAM.
optimization by 30-50%. Please note that this may not be true for very small
problems where the overhead of dispatching work to multiple threads outweighs
the benefit. We recommend that you benchmark your problem with/without TBB.
-3. Add `-march=native` to `GTSAM_CMAKE_CXX_FLAGS`. A performance gain of
+3. Use `GTSAM_BUILD_WITH_MARCH_NATIVE`. A performance gain of
25-30% can be expected on modern processors. Note that this affects the portability
of your executable. It may not run when copied to another system with older/different
processor architecture.
diff --git a/README.md b/README.md
index 1ccde9738..40b84dff8 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@
**As of January 2023, the `develop` branch is officially in "Pre 4.3" mode. We envision several API-breaking changes as we switch to C++17 and away from boost.**
-In addition, features deprecated in 4.2 will be removed. Please use the last [4.2a8 release](https://github.com/borglab/gtsam/releases/tag/4.2a8) if you need those features. However, most are easily converted and can be tracked down (in 4.2) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V42`.
+In addition, features deprecated in 4.2 will be removed. Please use the stable [4.2 release](https://github.com/borglab/gtsam/releases/tag/4.2) if you need those features. However, most are easily converted and can be tracked down (in 4.2) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V42`.
## What is GTSAM?
diff --git a/cmake/GtsamBuildTypes.cmake b/cmake/GtsamBuildTypes.cmake
index b24be5f08..2aad58abb 100644
--- a/cmake/GtsamBuildTypes.cmake
+++ b/cmake/GtsamBuildTypes.cmake
@@ -55,9 +55,6 @@ if(NOT CMAKE_BUILD_TYPE AND NOT MSVC AND NOT XCODE_VERSION)
"Choose the type of build, options are: None Debug Release Timing Profiling RelWithDebInfo." FORCE)
endif()
-# Add option for using build type postfixes to allow installing multiple build modes
-option(GTSAM_BUILD_TYPE_POSTFIXES "Enable/Disable appending the build type to the name of compiled libraries" ON)
-
# Define all cache variables, to be populated below depending on the OS/compiler:
set(GTSAM_COMPILE_OPTIONS_PRIVATE "" CACHE INTERNAL "(Do not edit) Private compiler flags for all build configurations." FORCE)
set(GTSAM_COMPILE_OPTIONS_PUBLIC "" CACHE INTERNAL "(Do not edit) Public compiler flags (exported to user projects) for all build configurations." FORCE)
@@ -82,6 +79,13 @@ set(GTSAM_COMPILE_DEFINITIONS_PRIVATE_RELWITHDEBINFO "NDEBUG" CACHE STRING "(Us
set(GTSAM_COMPILE_DEFINITIONS_PRIVATE_RELEASE "NDEBUG" CACHE STRING "(User editable) Private preprocessor macros for Release configuration.")
set(GTSAM_COMPILE_DEFINITIONS_PRIVATE_PROFILING "NDEBUG" CACHE STRING "(User editable) Private preprocessor macros for Profiling configuration.")
set(GTSAM_COMPILE_DEFINITIONS_PRIVATE_TIMING "NDEBUG;ENABLE_TIMING" CACHE STRING "(User editable) Private preprocessor macros for Timing configuration.")
+
+mark_as_advanced(GTSAM_COMPILE_DEFINITIONS_PRIVATE_DEBUG)
+mark_as_advanced(GTSAM_COMPILE_DEFINITIONS_PRIVATE_RELWITHDEBINFO)
+mark_as_advanced(GTSAM_COMPILE_DEFINITIONS_PRIVATE_RELEASE)
+mark_as_advanced(GTSAM_COMPILE_DEFINITIONS_PRIVATE_PROFILING)
+mark_as_advanced(GTSAM_COMPILE_DEFINITIONS_PRIVATE_TIMING)
+
if(MSVC)
# Common to all configurations:
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE
@@ -143,6 +147,13 @@ else()
set(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING -g -O3 CACHE STRING "(User editable) Private compiler flags for Timing configuration.")
endif()
+mark_as_advanced(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON)
+mark_as_advanced(GTSAM_COMPILE_OPTIONS_PRIVATE_DEBUG)
+mark_as_advanced(GTSAM_COMPILE_OPTIONS_PRIVATE_RELWITHDEBINFO)
+mark_as_advanced(GTSAM_COMPILE_OPTIONS_PRIVATE_RELEASE)
+mark_as_advanced(GTSAM_COMPILE_OPTIONS_PRIVATE_PROFILING)
+mark_as_advanced(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING)
+
# Enable C++17:
if (NOT CMAKE_VERSION VERSION_LESS 3.8)
set(GTSAM_COMPILE_FEATURES_PUBLIC "cxx_std_17" CACHE STRING "CMake compile features property for all gtsam targets.")
@@ -198,7 +209,6 @@ if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")
endif()
if (NOT MSVC)
- option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" OFF)
if(GTSAM_BUILD_WITH_MARCH_NATIVE)
# Check if Apple OS and compiler is [Apple]Clang
if(APPLE AND (${CMAKE_CXX_COMPILER_ID} MATCHES "^(Apple)?Clang$"))
diff --git a/cmake/GtsamTesting.cmake b/cmake/GtsamTesting.cmake
index 573fb696a..47b059213 100644
--- a/cmake/GtsamTesting.cmake
+++ b/cmake/GtsamTesting.cmake
@@ -42,7 +42,7 @@ endmacro()
# GTSAM_BUILD_EXAMPLES_ALWAYS is enabled. They may also be built with 'make examples'.
#
# Usage example:
-# gtsamAddExamplesGlob("*.cpp" "BrokenExample.cpp" "gtsam;GeographicLib")
+# gtsamAddExamplesGlob("*.cpp" "BrokenExample.cpp" "gtsam;GeographicLib" ON)
#
# Arguments:
# globPatterns: The list of files or glob patterns from which to create examples, with
@@ -51,8 +51,9 @@ endmacro()
# excludedFiles: A list of files or globs to exclude, e.g. "C*.cpp;BrokenExample.cpp". Pass
# an empty string "" if nothing needs to be excluded.
# linkLibraries: The list of libraries to link to.
-macro(gtsamAddExamplesGlob globPatterns excludedFiles linkLibraries)
- gtsamAddExesGlob_impl("${globPatterns}" "${excludedFiles}" "${linkLibraries}" "examples" ${GTSAM_BUILD_EXAMPLES_ALWAYS})
+# buildWithAll: Build examples with `make` and/or `make all`
+macro(gtsamAddExamplesGlob globPatterns excludedFiles linkLibraries buildWithAll)
+ gtsamAddExesGlob_impl("${globPatterns}" "${excludedFiles}" "${linkLibraries}" "examples" ${buildWithAll})
endmacro()
@@ -76,8 +77,9 @@ endmacro()
# excludedFiles: A list of files or globs to exclude, e.g. "C*.cpp;BrokenExample.cpp". Pass
# an empty string "" if nothing needs to be excluded.
# linkLibraries: The list of libraries to link to.
-macro(gtsamAddTimingGlob globPatterns excludedFiles linkLibraries)
- gtsamAddExesGlob_impl("${globPatterns}" "${excludedFiles}" "${linkLibraries}" "timing" ${GTSAM_BUILD_TIMING_ALWAYS})
+# buildWithAll: Build examples with `make` and/or `make all`
+macro(gtsamAddTimingGlob globPatterns excludedFiles linkLibraries buildWithAll)
+ gtsamAddExesGlob_impl("${globPatterns}" "${excludedFiles}" "${linkLibraries}" "timing" ${buildWithAll})
endmacro()
@@ -86,9 +88,8 @@ endmacro()
# Build macros for using tests
enable_testing()
-option(GTSAM_BUILD_TESTS "Enable/Disable building of tests" ON)
-option(GTSAM_BUILD_EXAMPLES_ALWAYS "Build examples with 'make all' (build with 'make examples' if not)" ON)
-option(GTSAM_BUILD_TIMING_ALWAYS "Build timing scripts with 'make all' (build with 'make timing' if not" OFF)
+#TODO(Varun) Move to HandlePrintConfiguration.cmake. This will require additional changes.
+option(GTSAM_BUILD_TESTS "Enable/Disable building of tests" ON)
# Add option for combining unit tests
if(MSVC OR XCODE_VERSION)
@@ -123,6 +124,7 @@ add_custom_target(timing)
# Implementations of this file's macros:
macro(gtsamAddTestsGlob_impl groupName globPatterns excludedFiles linkLibraries)
+ #TODO(Varun) Building of tests should not depend on global gtsam flag
if(GTSAM_BUILD_TESTS)
# Add group target if it doesn't already exist
if(NOT TARGET check.${groupName})
diff --git a/cmake/HandleGeneralOptions.cmake b/cmake/HandleGeneralOptions.cmake
index af75618c8..8c56ae242 100644
--- a/cmake/HandleGeneralOptions.cmake
+++ b/cmake/HandleGeneralOptions.cmake
@@ -8,6 +8,18 @@ else()
set(GTSAM_UNSTABLE_AVAILABLE 0)
endif()
+### GtsamTesting related options
+option(GTSAM_BUILD_EXAMPLES_ALWAYS "Build examples with 'make all' (build with 'make examples' if not)" ON)
+option(GTSAM_BUILD_TIMING_ALWAYS "Build timing scripts with 'make all' (build with 'make timing' if not" OFF)
+###
+
+# Add option for using build type postfixes to allow installing multiple build modes
+option(GTSAM_BUILD_TYPE_POSTFIXES "Enable/Disable appending the build type to the name of compiled libraries" ON)
+
+if (NOT MSVC)
+ option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" OFF)
+endif()
+
# Configurable Options
if(GTSAM_UNSTABLE_AVAILABLE)
option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON)
diff --git a/cmake/README.html b/cmake/README.html
index 8170cd489..9cdb5c758 100644
--- a/cmake/README.html
+++ b/cmake/README.html
@@ -47,9 +47,9 @@ linkLibraries: The list of libraries to link to in addition to CppUnitLite.
-gtsamAddExamplesGlob(globPatterns excludedFiles linkLibraries) Add scripts that will serve as examples of how to use the library. A list of files or glob patterns is specified, and one executable will be created for each matching .cpp file. These executables will not be installed. They are build with 'make all' if GTSAM_BUILD_EXAMPLES_ALWAYS is enabled. They may also be built with 'make examples'.
+gtsamAddExamplesGlob(globPatterns excludedFiles linkLibraries buildWithAll) Add scripts that will serve as examples of how to use the library. A list of files or glob patterns is specified, and one executable will be created for each matching .cpp file. These executables will not be installed. They are build with 'make all' if GTSAM_BUILD_EXAMPLES_ALWAYS is enabled. They may also be built with 'make examples'.
Usage example:
-gtsamAddExamplesGlob("*.cpp" "BrokenExample.cpp" "gtsam;GeographicLib")
+gtsamAddExamplesGlob("*.cpp" "BrokenExample.cpp" "gtsam;GeographicLib" ON)
Arguments:
globPatterns: The list of files or glob patterns from which to create unit tests, with
@@ -58,6 +58,7 @@ linkLibraries: The list of libraries to link to in addition to CppUnitLite.
excludedFiles: A list of files or globs to exclude, e.g. "C*.cpp;BrokenExample.cpp". Pass
an empty string "" if nothing needs to be excluded.
linkLibraries: The list of libraries to link to.
+buildWithAll: Build examples with `make` and/or `make all`
diff --git a/cmake/README.md b/cmake/README.md
index 569a401b1..4203b8d3c 100644
--- a/cmake/README.md
+++ b/cmake/README.md
@@ -52,11 +52,11 @@ Defines two useful functions for creating CTest unit tests. Also immediately cr
Pass an empty string "" if nothing needs to be excluded.
linkLibraries: The list of libraries to link to in addition to CppUnitLite.
-* `gtsamAddExamplesGlob(globPatterns excludedFiles linkLibraries)` Add scripts that will serve as examples of how to use the library. A list of files or glob patterns is specified, and one executable will be created for each matching .cpp file. These executables will not be installed. They are build with 'make all' if GTSAM_BUILD_EXAMPLES_ALWAYS is enabled. They may also be built with 'make examples'.
+* `gtsamAddExamplesGlob(globPatterns excludedFiles linkLibraries buildWithAll)` Add scripts that will serve as examples of how to use the library. A list of files or glob patterns is specified, and one executable will be created for each matching .cpp file. These executables will not be installed. They are build with 'make all' if GTSAM_BUILD_EXAMPLES_ALWAYS is enabled. They may also be built with 'make examples'.
Usage example:
- gtsamAddExamplesGlob("*.cpp" "BrokenExample.cpp" "gtsam;GeographicLib")
+ gtsamAddExamplesGlob("*.cpp" "BrokenExample.cpp" "gtsam;GeographicLib" ON)
Arguments:
@@ -66,6 +66,7 @@ Defines two useful functions for creating CTest unit tests. Also immediately cr
excludedFiles: A list of files or globs to exclude, e.g. "C*.cpp;BrokenExample.cpp". Pass
an empty string "" if nothing needs to be excluded.
linkLibraries: The list of libraries to link to.
+ buildWithAll: Build examples with `make` and/or `make all`
## GtsamMakeConfigFile
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index f0975821f..05e0a13ef 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -56,7 +56,7 @@ if (GTSAM_BUILD_DOCS)
if (GTSAM_BUILD_UNSTABLE)
list(APPEND doc_subdirs ${gtsam_unstable_doc_subdirs})
endif()
- if (GTSAM_BUILD_EXAMPLES)
+ if (GTSAM_BUILD_EXAMPLES_ALWAYS)
list(APPEND doc_subdirs examples)
endif()
diff --git a/doc/Hybrid.lyx b/doc/Hybrid.lyx
index c854a4845..44df81e39 100644
--- a/doc/Hybrid.lyx
+++ b/doc/Hybrid.lyx
@@ -86,7 +86,7 @@ Hybrid Inference
\end_layout
\begin_layout Author
-Frank Dellaert & Varun Agrawal
+Frank Dellaert
\end_layout
\begin_layout Date
@@ -264,12 +264,110 @@ If we take the log of
we get
\begin_inset Formula
\begin{equation}
-\log P(x|y,m)=\log P_{m}(x|y)=K_{gcm}-E_{gcm}(x,y).\label{eq:gm_log}
+\log P(x|y,m)=\log P_{m}(x|y)=K_{gc}(m)-E_{gcm}(x,y).\label{eq:gm_log}
\end{equation}
\end_inset
-Equating
+
+\end_layout
+
+\begin_layout Standard
+\noindent
+For conciseness, we will write
+\begin_inset Formula $K_{gc}(m)$
+\end_inset
+
+ as
+\begin_inset Formula $K_{gcm}$
+\end_inset
+
+.
+\end_layout
+
+\begin_layout Standard
+\SpecialChar allowbreak
+
+\end_layout
+
+\begin_layout Standard
+\noindent
+The key point here is that
+\family roman
+\series medium
+\shape up
+\size normal
+\emph off
+\bar no
+\strikeout off
+\xout off
+\uuline off
+\uwave off
+\noun off
+\color none
+
+\begin_inset Formula $K_{gm}$
+\end_inset
+
+
+\family default
+\series default
+\shape default
+\size default
+\emph default
+\bar default
+\strikeout default
+\xout default
+\uuline default
+\uwave default
+\noun default
+\color inherit
+ is the log-normalization constant for the complete
+\emph on
+GaussianMixture
+\emph default
+ across all values of
+\begin_inset Formula $m$
+\end_inset
+
+, and cannot be dependent on the value of
+\begin_inset Formula $m$
+\end_inset
+
+.
+ In contrast,
+\begin_inset Formula $K_{gcm}$
+\end_inset
+
+ is the log-normalization constant for a specific
+\emph on
+GaussianConditional
+\emph default
+mode (thus dependent on
+\begin_inset Formula $m$
+\end_inset
+
+) and can have differing values based on the covariance matrices for each
+ mode.
+ Thus to obtain a constant
+\begin_inset Formula $K_{gm}$
+\end_inset
+
+ which satisfies the invariant, we need to specify
+\begin_inset Formula $E_{gm}(x,y,m)$
+\end_inset
+
+ accordingly.
+\end_layout
+
+\begin_layout Standard
+\SpecialChar allowbreak
+
+\end_layout
+
+\begin_layout Standard
+\noindent
+By equating
\begin_inset CommandInset ref
LatexCommand eqref
reference "eq:gm_invariant"
@@ -289,7 +387,7 @@ noprefix "false"
\end_inset
- we see that this can be achieved by defining the error
+, we see that this can be achieved by defining the error
\begin_inset Formula $E_{gm}(x,y,m)$
\end_inset
@@ -541,9 +639,9 @@ noprefix "false"
:
\begin_inset Formula
-\[
-E_{mf}(y,m)=C+E_{gcm}(\bar{x},y)-K_{gcm}.
-\]
+\begin{equation}
+E_{mf}(y,m)=C+E_{gcm}(\bar{x},y)-K_{gcm}\label{eq:mixture_factor}
+\end{equation}
\end_inset
@@ -555,8 +653,25 @@ Unfortunately, we can no longer choose
\begin_inset Formula $m$
\end_inset
- to make the constant disappear.
- There are two possibilities:
+ to make the constant disappear, since
+\begin_inset Formula $C$
+\end_inset
+
+ has to be a constant applicable across all
+\begin_inset Formula $m$
+\end_inset
+
+.
+\end_layout
+
+\begin_layout Standard
+\SpecialChar allowbreak
+
+\end_layout
+
+\begin_layout Standard
+\noindent
+There are two possibilities:
\end_layout
\begin_layout Enumerate
diff --git a/doc/Hybrid.pdf b/doc/Hybrid.pdf
index 558be2902..182573412 100644
Binary files a/doc/Hybrid.pdf and b/doc/Hybrid.pdf differ
diff --git a/docker/ubuntu-gtsam-python/Dockerfile b/docker/ubuntu-gtsam-python/Dockerfile
index 85eed4d4e..4a7c4b37f 100644
--- a/docker/ubuntu-gtsam-python/Dockerfile
+++ b/docker/ubuntu-gtsam-python/Dockerfile
@@ -6,9 +6,6 @@ FROM borglab/ubuntu-gtsam:bionic
# Install pip
RUN apt-get install -y python3-pip python3-dev
-# Install python wrapper requirements
-RUN python3 -m pip install -U -r /usr/src/gtsam/python/requirements.txt
-
# Run cmake again, now with python toolbox on
WORKDIR /usr/src/gtsam/build
RUN cmake \
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index 52d90deb9..280b82265 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -20,4 +20,4 @@ if (NOT GTSAM_USE_BOOST_FEATURES)
)
endif()
-gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam;${Boost_PROGRAM_OPTIONS_LIBRARY}")
+gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam;${Boost_PROGRAM_OPTIONS_LIBRARY}" ${GTSAM_BUILD_EXAMPLES_ALWAYS})
diff --git a/examples/GNCExample.cpp b/examples/GNCExample.cpp
new file mode 100644
index 000000000..8a4f0706f
--- /dev/null
+++ b/examples/GNCExample.cpp
@@ -0,0 +1,75 @@
+/* ----------------------------------------------------------------------------
+
+ * GTSAM Copyright 2010, Georgia Tech Research Corporation,
+ * Atlanta, Georgia 30332-0415
+ * All Rights Reserved
+ * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+
+ * See LICENSE for the license information
+
+ * -------------------------------------------------------------------------- */
+
+/**
+ * @file GNCExample.cpp
+ * @brief Simple example showcasing a Graduated Non-Convexity based solver
+ * @author Achintya Mohan
+ */
+
+/**
+ * A simple 2D pose graph optimization example
+ * - The robot is initially at origin (0.0, 0.0, 0.0)
+ * - We have full odometry measurements for 2 motions
+ * - The robot first moves to (1.0, 0.0, 0.1) and then to (1.0, 1.0, 0.2)
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+using namespace std;
+using namespace gtsam;
+
+int main() {
+ cout << "Graduated Non-Convexity Example\n";
+
+ NonlinearFactorGraph graph;
+
+ // Add a prior to the first point, set to the origin
+ auto priorNoise = noiseModel::Isotropic::Sigma(3, 0.1);
+ graph.addPrior(1, Pose2(0.0, 0.0, 0.0), priorNoise);
+
+ // Add additional factors, noise models must be Gaussian
+ Pose2 x1(1.0, 0.0, 0.1);
+ graph.emplace_shared>(1, 2, x1, noiseModel::Isotropic::Sigma(3, 0.2));
+ Pose2 x2(0.0, 1.0, 0.1);
+ graph.emplace_shared>(2, 3, x2, noiseModel::Isotropic::Sigma(3, 0.4));
+
+ // Initial estimates
+ Values initial;
+ initial.insert(1, Pose2(0.2, 0.5, -0.1));
+ initial.insert(2, Pose2(0.8, 0.3, 0.1));
+ initial.insert(3, Pose2(0.8, 0.2, 0.3));
+
+ // Set options for the non-minimal solver
+ LevenbergMarquardtParams lmParams;
+ lmParams.setMaxIterations(1000);
+ lmParams.setRelativeErrorTol(1e-5);
+
+ // Set GNC-specific options
+ GncParams gncParams(lmParams);
+ gncParams.setLossType(GncLossType::TLS);
+
+ // Optimize the graph and print results
+ GncOptimizer> optimizer(graph, initial, gncParams);
+ Values result = optimizer.optimize();
+ result.print("Final Result:");
+
+ return 0;
+}
+
diff --git a/gtsam/CMakeLists.txt b/gtsam/CMakeLists.txt
index efd0e9dc2..cb87a6bcd 100644
--- a/gtsam/CMakeLists.txt
+++ b/gtsam/CMakeLists.txt
@@ -112,7 +112,7 @@ if(GTSAM_SUPPORT_NESTED_DISSECTION)
endif()
# Versions
-set(gtsam_version ${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH})
+set(gtsam_version ${GTSAM_VERSION_STRING})
set(gtsam_soversion ${GTSAM_VERSION_MAJOR})
message(STATUS "GTSAM Version: ${gtsam_version}")
message(STATUS "Install prefix: ${CMAKE_INSTALL_PREFIX}")
@@ -145,10 +145,6 @@ if (GTSAM_USE_EIGEN_MKL)
target_include_directories(gtsam PUBLIC ${MKL_INCLUDE_DIR})
endif()
-if(GTSAM_USE_TBB)
- target_include_directories(gtsam PUBLIC ${TBB_INCLUDE_DIRS})
-endif()
-
# Add includes for source directories 'BEFORE' boost and any system include
# paths so that the compiler uses GTSAM headers in our source directory instead
# of any previously installed GTSAM headers.
diff --git a/gtsam/base/serialization.h b/gtsam/base/serialization.h
index e615afe83..18612bc22 100644
--- a/gtsam/base/serialization.h
+++ b/gtsam/base/serialization.h
@@ -17,6 +17,7 @@
* @date Feb 7, 2012
*/
+#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION
#pragma once
#include
@@ -270,3 +271,4 @@ void deserializeBinary(const std::string& serialized, T& output,
///@}
} // namespace gtsam
+#endif
diff --git a/gtsam/discrete/AlgebraicDecisionTree.h b/gtsam/discrete/AlgebraicDecisionTree.h
index 20ab4bd68..9f55f3b63 100644
--- a/gtsam/discrete/AlgebraicDecisionTree.h
+++ b/gtsam/discrete/AlgebraicDecisionTree.h
@@ -36,7 +36,7 @@ namespace gtsam {
* @ingroup discrete
*/
template
- class GTSAM_EXPORT AlgebraicDecisionTree : public DecisionTree {
+ class AlgebraicDecisionTree : public DecisionTree {
/**
* @brief Default method used by `labelFormatter` or `valueFormatter` when
* printing.
diff --git a/gtsam/discrete/DecisionTreeFactor.cpp b/gtsam/discrete/DecisionTreeFactor.cpp
index 17731453a..7202e6853 100644
--- a/gtsam/discrete/DecisionTreeFactor.cpp
+++ b/gtsam/discrete/DecisionTreeFactor.cpp
@@ -164,15 +164,23 @@ namespace gtsam {
}
// create new factor, note we collect keys that are not in frontalKeys
- // TODO(frank): why do we need this??? result should contain correct keys!!!
+ /*
+ Due to branch merging, the labels in `result` may be missing some keys
+ E.g. After branch merging, we may get a ADT like:
+ Leaf [2] 1.0204082
+
+ This is missing the key values used for branching.
+ */
+ KeyVector difference, frontalKeys_(frontalKeys), keys_(keys());
+ // Get the difference of the frontalKeys and the factor keys using set_difference
+ std::sort(keys_.begin(), keys_.end());
+ std::sort(frontalKeys_.begin(), frontalKeys_.end());
+ std::set_difference(keys_.begin(), keys_.end(), frontalKeys_.begin(),
+ frontalKeys_.end(), back_inserter(difference));
+
DiscreteKeys dkeys;
- for (i = 0; i < keys().size(); i++) {
- Key j = keys()[i];
- // TODO(frank): inefficient!
- if (std::find(frontalKeys.begin(), frontalKeys.end(), j) !=
- frontalKeys.end())
- continue;
- dkeys.push_back(DiscreteKey(j, cardinality(j)));
+ for (Key key : difference) {
+ dkeys.push_back(DiscreteKey(key, cardinality(key)));
}
return std::make_shared(dkeys, result);
}
diff --git a/gtsam/discrete/DiscreteBayesTree.h b/gtsam/discrete/DiscreteBayesTree.h
index 34f8f5d4a..89baf49a9 100644
--- a/gtsam/discrete/DiscreteBayesTree.h
+++ b/gtsam/discrete/DiscreteBayesTree.h
@@ -110,4 +110,12 @@ class GTSAM_EXPORT DiscreteBayesTree
/// @}
};
+/// traits
+template <>
+struct traits
+ : public Testable {};
+
+template <>
+struct traits : public Testable {};
+
} // namespace gtsam
diff --git a/gtsam/discrete/DiscreteConditional.h b/gtsam/discrete/DiscreteConditional.h
index 183cf8561..50fa6e161 100644
--- a/gtsam/discrete/DiscreteConditional.h
+++ b/gtsam/discrete/DiscreteConditional.h
@@ -77,6 +77,18 @@ class GTSAM_EXPORT DiscreteConditional
const Signature::Table& table)
: DiscreteConditional(Signature(key, parents, table)) {}
+ /**
+ * Construct from key, parents, and a vector specifying the
+ * conditional probability table (CPT) in 00 01 10 11 order. For
+ * three-valued, it would be 00 01 02 10 11 12 20 21 22, etc....
+ *
+ * Example: DiscreteConditional P(D, {B,E}, table);
+ */
+ DiscreteConditional(const DiscreteKey& key, const DiscreteKeys& parents,
+ const std::vector& table)
+ : DiscreteConditional(1, DiscreteKeys{key} & parents,
+ ADT(DiscreteKeys{key} & parents, table)) {}
+
/**
* Construct from key, parents, and a string specifying the conditional
* probability table (CPT) in 00 01 10 11 order. For three-valued, it would
diff --git a/gtsam/discrete/DiscreteKey.cpp b/gtsam/discrete/DiscreteKey.cpp
index 79d11d8a7..17f233695 100644
--- a/gtsam/discrete/DiscreteKey.cpp
+++ b/gtsam/discrete/DiscreteKey.cpp
@@ -49,6 +49,7 @@ namespace gtsam {
void DiscreteKeys::print(const std::string& s,
const KeyFormatter& keyFormatter) const {
+ std::cout << (s.empty() ? "" : s + " ") << std::endl;
for (auto&& dkey : *this) {
std::cout << DefaultKeyFormatter(dkey.first) << " " << dkey.second
<< std::endl;
diff --git a/gtsam/discrete/DiscreteKey.h b/gtsam/discrete/DiscreteKey.h
index 3a626c6b3..44cc192ef 100644
--- a/gtsam/discrete/DiscreteKey.h
+++ b/gtsam/discrete/DiscreteKey.h
@@ -74,6 +74,12 @@ namespace gtsam {
return *this;
}
+ /// Add multiple keys (non-const!)
+ DiscreteKeys& operator&(const DiscreteKeys& keys) {
+ this->insert(this->end(), keys.begin(), keys.end());
+ return *this;
+ }
+
/// Print the keys and cardinalities.
void print(const std::string& s = "",
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
diff --git a/gtsam/discrete/discrete.i b/gtsam/discrete/discrete.i
index fe8cbc7f3..a1731f8e5 100644
--- a/gtsam/discrete/discrete.i
+++ b/gtsam/discrete/discrete.i
@@ -280,11 +280,11 @@ class DiscreteLookupDAG {
};
#include
-std::pair
+pair
EliminateDiscrete(const gtsam::DiscreteFactorGraph& factors,
const gtsam::Ordering& frontalKeys);
-std::pair
+pair
EliminateForMPE(const gtsam::DiscreteFactorGraph& factors,
const gtsam::Ordering& frontalKeys);
diff --git a/gtsam/discrete/tests/testDiscreteConditional.cpp b/gtsam/discrete/tests/testDiscreteConditional.cpp
index dae279713..f2c6d7b9f 100644
--- a/gtsam/discrete/tests/testDiscreteConditional.cpp
+++ b/gtsam/discrete/tests/testDiscreteConditional.cpp
@@ -48,6 +48,11 @@ TEST(DiscreteConditional, constructors) {
DiscreteConditional actual2(1, f2);
DecisionTreeFactor expected2 = f2 / *f2.sum(1);
EXPECT(assert_equal(expected2, static_cast(actual2)));
+
+ std::vector probs{0.2, 0.5, 0.3, 0.6, 0.4, 0.7, 0.25, 0.55, 0.35, 0.65, 0.45, 0.75};
+ DiscreteConditional actual3(X, {Y, Z}, probs);
+ DecisionTreeFactor expected3 = f2;
+ EXPECT(assert_equal(expected3, static_cast(actual3)));
}
/* ************************************************************************* */
diff --git a/gtsam/discrete/tests/testDiscreteMarginals.cpp b/gtsam/discrete/tests/testDiscreteMarginals.cpp
index ad9b1b191..76bed2fd6 100644
--- a/gtsam/discrete/tests/testDiscreteMarginals.cpp
+++ b/gtsam/discrete/tests/testDiscreteMarginals.cpp
@@ -121,7 +121,7 @@ TEST_UNSAFE( DiscreteMarginals, truss ) {
Clique expected0(std::make_shared((key[0] | key[2], key[4]) = "2/1 2/1 2/1 2/1"));
Clique::shared_ptr actual0 = (*bayesTree)[0];
-// EXPECT(assert_equal(expected0, *actual0)); // TODO, correct but fails
+ EXPECT(assert_equal(expected0, *actual0));
Clique expected1(std::make_shared((key[1] | key[3], key[4]) = "1/2 1/2 1/2 1/2"));
Clique::shared_ptr actual1 = (*bayesTree)[1];
diff --git a/gtsam/geometry/CameraSet.h b/gtsam/geometry/CameraSet.h
index 23a4b467e..cf4beb883 100644
--- a/gtsam/geometry/CameraSet.h
+++ b/gtsam/geometry/CameraSet.h
@@ -131,12 +131,15 @@ class CameraSet : public std::vector> {
return z;
}
- /** An overload o the project2 function to accept
+ /** An overload of the project2 function to accept
* full matrices and vectors and pass it to the pointer
- * version of the function
+ * version of the function.
+ *
+ * Use SFINAE to resolve overload ambiguity.
*/
template
- ZVector project2(const POINT& point, OptArgs&... args) const {
+ typename std::enable_if<(sizeof...(OptArgs) != 0), ZVector>::type project2(
+ const POINT& point, OptArgs&... args) const {
// pass it to the pointer version of the function
return project2(point, (&args)...);
}
@@ -435,8 +438,7 @@ class CameraSet : public std::vector> {
// (DxD) += (DxZDim) * ( (ZDimxD) - (ZDimx3) * (3xZDim) * (ZDimxD) )
// add contribution of current factor
- // TODO(gareth): Eigen doesn't let us pass the expression. Call eval() for
- // now...
+ // Eigen doesn't let us pass the expression so we call eval()
augmentedHessian.updateDiagonalBlock(
aug_i,
((FiT *
diff --git a/gtsam/geometry/PinholePose.h b/gtsam/geometry/PinholePose.h
index 5cad3b6e7..df6ec5c08 100644
--- a/gtsam/geometry/PinholePose.h
+++ b/gtsam/geometry/PinholePose.h
@@ -332,7 +332,8 @@ public:
}
/// stream operator
- friend std::ostream& operator<<(std::ostream &os, const PinholePose& camera) {
+ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os,
+ const PinholePose& camera) {
os << "{R: " << camera.pose().rotation().rpy().transpose();
os << ", t: " << camera.pose().translation().transpose();
if (!camera.K_) os << ", K: none";
diff --git a/gtsam/geometry/Similarity2.h b/gtsam/geometry/Similarity2.h
index 47281b383..6bec716ea 100644
--- a/gtsam/geometry/Similarity2.h
+++ b/gtsam/geometry/Similarity2.h
@@ -74,9 +74,10 @@ class GTSAM_EXPORT Similarity2 : public LieGroup {
bool operator==(const Similarity2& other) const;
/// Print with optional string
- void print(const std::string& s) const;
+ void print(const std::string& s = "") const;
- friend std::ostream& operator<<(std::ostream& os, const Similarity2& p);
+ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os,
+ const Similarity2& p);
/// @}
/// @name Group
diff --git a/gtsam/geometry/Similarity3.cpp b/gtsam/geometry/Similarity3.cpp
index a9a369e44..cf2360b08 100644
--- a/gtsam/geometry/Similarity3.cpp
+++ b/gtsam/geometry/Similarity3.cpp
@@ -171,7 +171,7 @@ Similarity3 Similarity3::Align(const Point3Pairs &abPointPairs) {
return internal::align(d_abPointPairs, aRb, centroids);
}
-Similarity3 Similarity3::Align(const vector &abPosePairs) {
+Similarity3 Similarity3::Align(const Pose3Pairs &abPosePairs) {
const size_t n = abPosePairs.size();
if (n < 2)
throw std::runtime_error("input should have at least 2 pairs of poses");
diff --git a/gtsam/geometry/Similarity3.h b/gtsam/geometry/Similarity3.h
index aa0f3a62a..cd4af89bc 100644
--- a/gtsam/geometry/Similarity3.h
+++ b/gtsam/geometry/Similarity3.h
@@ -75,9 +75,10 @@ class GTSAM_EXPORT Similarity3 : public LieGroup {
bool operator==(const Similarity3& other) const;
/// Print with optional string
- void print(const std::string& s) const;
+ void print(const std::string& s = "") const;
- friend std::ostream& operator<<(std::ostream& os, const Similarity3& p);
+ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os,
+ const Similarity3& p);
/// @}
/// @name Group
@@ -132,7 +133,7 @@ class GTSAM_EXPORT Similarity3 : public LieGroup {
* computed using the algorithm described here:
* http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf
*/
- static Similarity3 Align(const std::vector& abPosePairs);
+ static Similarity3 Align(const Pose3Pairs& abPosePairs);
/// @}
/// @name Lie Group
diff --git a/gtsam/geometry/SphericalCamera.h b/gtsam/geometry/SphericalCamera.h
index a9c38cd69..ef20aa7fe 100644
--- a/gtsam/geometry/SphericalCamera.h
+++ b/gtsam/geometry/SphericalCamera.h
@@ -238,9 +238,9 @@ class GTSAM_EXPORT SphericalCamera {
// end of class SphericalCamera
template <>
-struct traits : public internal::LieGroup {};
+struct traits : public internal::Manifold {};
template <>
-struct traits : public internal::LieGroup {};
+struct traits : public internal::Manifold {};
} // namespace gtsam
diff --git a/gtsam/geometry/Unit3.h b/gtsam/geometry/Unit3.h
index 72dd49c29..18bc5d9f0 100644
--- a/gtsam/geometry/Unit3.h
+++ b/gtsam/geometry/Unit3.h
@@ -105,7 +105,8 @@ public:
/// @name Testable
/// @{
- friend std::ostream& operator<<(std::ostream& os, const Unit3& pair);
+ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os,
+ const Unit3& pair);
/// The print fuction
void print(const std::string& s = std::string()) const;
diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i
index 57f2acffe..01161817b 100644
--- a/gtsam/geometry/geometry.i
+++ b/gtsam/geometry/geometry.i
@@ -1082,6 +1082,7 @@ class Similarity2 {
// Standard Interface
bool equals(const gtsam::Similarity2& sim, double tol) const;
+ void print(const std::string& s = "") const;
Matrix matrix() const;
gtsam::Rot2& rotation();
gtsam::Point2& translation();
@@ -1105,6 +1106,7 @@ class Similarity3 {
// Standard Interface
bool equals(const gtsam::Similarity3& sim, double tol) const;
+ void print(const std::string& s = "") const;
Matrix matrix() const;
gtsam::Rot3& rotation();
gtsam::Point3& translation();
@@ -1180,11 +1182,13 @@ class TriangulationParameters {
bool enableEPI;
double landmarkDistanceThreshold;
double dynamicOutlierRejectionThreshold;
+ bool useLOST;
gtsam::SharedNoiseModel noiseModel;
TriangulationParameters(const double rankTolerance = 1.0,
const bool enableEPI = false,
double landmarkDistanceThreshold = -1,
double dynamicOutlierRejectionThreshold = -1,
+ const bool useLOST = false,
const gtsam::SharedNoiseModel& noiseModel = nullptr);
};
diff --git a/gtsam/geometry/tests/testSphericalCamera.cpp b/gtsam/geometry/tests/testSphericalCamera.cpp
index 4bc851f35..cf8970dc4 100644
--- a/gtsam/geometry/tests/testSphericalCamera.cpp
+++ b/gtsam/geometry/tests/testSphericalCamera.cpp
@@ -24,6 +24,13 @@
#include
#include
+#if defined(__i686__) || defined(__i386__)
+// See issue discussion: https://github.com/borglab/gtsam/issues/1605
+constexpr double TEST_THRESHOLD = 1e-5;
+#else
+constexpr double TEST_THRESHOLD = 1e-7;
+#endif
+
using namespace std::placeholders;
using namespace std;
using namespace gtsam;
@@ -104,8 +111,8 @@ TEST(SphericalCamera, Dproject) {
Matrix numerical_pose = numericalDerivative21(project3, pose, point1);
Matrix numerical_point = numericalDerivative22(project3, pose, point1);
EXPECT(assert_equal(bearing1, result));
- EXPECT(assert_equal(numerical_pose, Dpose, 1e-7));
- EXPECT(assert_equal(numerical_point, Dpoint, 1e-7));
+ EXPECT(assert_equal(numerical_pose, Dpose, TEST_THRESHOLD));
+ EXPECT(assert_equal(numerical_point, Dpoint, TEST_THRESHOLD));
}
/* ************************************************************************* */
@@ -123,8 +130,8 @@ TEST(SphericalCamera, reprojectionError) {
Matrix numerical_point =
numericalDerivative32(reprojectionError2, pose, point1, bearing1);
EXPECT(assert_equal(Vector2(0.0, 0.0), result));
- EXPECT(assert_equal(numerical_pose, Dpose, 1e-7));
- EXPECT(assert_equal(numerical_point, Dpoint, 1e-7));
+ EXPECT(assert_equal(numerical_pose, Dpose, TEST_THRESHOLD));
+ EXPECT(assert_equal(numerical_point, Dpoint, TEST_THRESHOLD));
}
/* ************************************************************************* */
@@ -137,9 +144,9 @@ TEST(SphericalCamera, reprojectionError_noisy) {
numericalDerivative31(reprojectionError2, pose, point1, bearing_noisy);
Matrix numerical_point =
numericalDerivative32(reprojectionError2, pose, point1, bearing_noisy);
- EXPECT(assert_equal(Vector2(-0.050282, 0.00833482), result, 1e-5));
- EXPECT(assert_equal(numerical_pose, Dpose, 1e-7));
- EXPECT(assert_equal(numerical_point, Dpoint, 1e-7));
+ EXPECT(assert_equal(Vector2(-0.050282, 0.00833482), result, 1e2*TEST_THRESHOLD));
+ EXPECT(assert_equal(numerical_pose, Dpose, TEST_THRESHOLD));
+ EXPECT(assert_equal(numerical_point, Dpoint, TEST_THRESHOLD));
}
/* ************************************************************************* */
@@ -151,8 +158,8 @@ TEST(SphericalCamera, Dproject2) {
camera.project2(point1, Dpose, Dpoint);
Matrix numerical_pose = numericalDerivative21(project3, pose1, point1);
Matrix numerical_point = numericalDerivative22(project3, pose1, point1);
- CHECK(assert_equal(numerical_pose, Dpose, 1e-7));
- CHECK(assert_equal(numerical_point, Dpoint, 1e-7));
+ CHECK(assert_equal(numerical_pose, Dpose, TEST_THRESHOLD));
+ CHECK(assert_equal(numerical_point, Dpoint, TEST_THRESHOLD));
}
/* ************************************************************************* */
diff --git a/gtsam/geometry/triangulation.h b/gtsam/geometry/triangulation.h
index 3a8398804..d82da3459 100644
--- a/gtsam/geometry/triangulation.h
+++ b/gtsam/geometry/triangulation.h
@@ -574,6 +574,11 @@ struct GTSAM_EXPORT TriangulationParameters {
*/
double dynamicOutlierRejectionThreshold;
+ /**
+ * if true, will use the LOST algorithm instead of DLT
+ */
+ bool useLOST;
+
SharedNoiseModel noiseModel; ///< used in the nonlinear triangulation
/**
@@ -588,10 +593,12 @@ struct GTSAM_EXPORT TriangulationParameters {
TriangulationParameters(const double _rankTolerance = 1.0,
const bool _enableEPI = false, double _landmarkDistanceThreshold = -1,
double _dynamicOutlierRejectionThreshold = -1,
+ const bool _useLOST = false,
const SharedNoiseModel& _noiseModel = nullptr) :
rankTolerance(_rankTolerance), enableEPI(_enableEPI), //
landmarkDistanceThreshold(_landmarkDistanceThreshold), //
dynamicOutlierRejectionThreshold(_dynamicOutlierRejectionThreshold),
+ useLOST(_useLOST),
noiseModel(_noiseModel){
}
@@ -604,6 +611,7 @@ struct GTSAM_EXPORT TriangulationParameters {
<< std::endl;
os << "dynamicOutlierRejectionThreshold = "
<< p.dynamicOutlierRejectionThreshold << std::endl;
+ os << "useLOST = " << p.useLOST << std::endl;
os << "noise model" << std::endl;
return os;
}
@@ -665,8 +673,8 @@ class TriangulationResult : public std::optional {
return value();
}
// stream to output
- friend std::ostream& operator<<(std::ostream& os,
- const TriangulationResult& result) {
+ GTSAM_EXPORT friend std::ostream& operator<<(
+ std::ostream& os, const TriangulationResult& result) {
if (result)
os << "point = " << *result << std::endl;
else
@@ -701,7 +709,7 @@ TriangulationResult triangulateSafe(const CameraSet& cameras,
try {
Point3 point =
triangulatePoint3(cameras, measured, params.rankTolerance,
- params.enableEPI, params.noiseModel);
+ params.enableEPI, params.noiseModel, params.useLOST);
// Check landmark distance and re-projection errors to avoid outliers
size_t i = 0;
diff --git a/gtsam/hybrid/HybridBayesTree.h b/gtsam/hybrid/HybridBayesTree.h
index bc2a0f852..f91e16cbf 100644
--- a/gtsam/hybrid/HybridBayesTree.h
+++ b/gtsam/hybrid/HybridBayesTree.h
@@ -123,6 +123,10 @@ class GTSAM_EXPORT HybridBayesTree : public BayesTree {
};
/// traits
+template <>
+struct traits : public Testable {
+};
+
template <>
struct traits : public Testable {};
diff --git a/gtsam/hybrid/HybridGaussianISAM.h b/gtsam/hybrid/HybridGaussianISAM.h
index 8578f3dc5..ba2de52d2 100644
--- a/gtsam/hybrid/HybridGaussianISAM.h
+++ b/gtsam/hybrid/HybridGaussianISAM.h
@@ -29,7 +29,8 @@
namespace gtsam {
/**
- * @brief
+ * @brief Incremental Smoothing and Mapping (ISAM) algorithm
+ * for hybrid factor graphs.
*
* @ingroup hybrid
*/
diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp
index 2459e4ec9..e51adb9cd 100644
--- a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp
+++ b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp
@@ -56,8 +56,6 @@ HybridGaussianFactorGraph::shared_ptr HybridNonlinearFactorGraph::linearize(
for (auto& f : factors_) {
// First check if it is a valid factor
if (!f) {
- // TODO(dellaert): why?
- linearFG->push_back(GaussianFactor::shared_ptr());
continue;
}
// Check if it is a nonlinear mixture factor
diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp
index df38c171e..b240e1626 100644
--- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp
+++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp
@@ -42,8 +42,8 @@
#include
#include
#include
-#include
#include
+#include
#include "Switching.h"
#include "TinyHybridExample.h"
@@ -613,11 +613,11 @@ TEST(HybridGaussianFactorGraph, assembleGraphTree) {
// Create expected decision tree with two factor graphs:
// Get mixture factor:
- auto mixture = std::dynamic_pointer_cast(fg.at(0));
+ auto mixture = fg.at(0);
CHECK(mixture);
// Get prior factor:
- const auto gf = std::dynamic_pointer_cast(fg.at(1));
+ const auto gf = fg.at(1);
CHECK(gf);
using GF = GaussianFactor::shared_ptr;
const GF prior = gf->asGaussian();
diff --git a/gtsam/inference/FactorGraph.h b/gtsam/inference/FactorGraph.h
index 327fca49a..b6046d0bb 100644
--- a/gtsam/inference/FactorGraph.h
+++ b/gtsam/inference/FactorGraph.h
@@ -310,6 +310,21 @@ class FactorGraph {
*/
sharedFactor& at(size_t i) { return factors_.at(i); }
+ /** Get a specific factor by index and typecast to factor type F
+ * (this checks array bounds and may throw
+ * an exception, as opposed to operator[] which does not).
+ */
+ template
+ std::shared_ptr at(size_t i) {
+ return std::dynamic_pointer_cast(factors_.at(i));
+ }
+
+ /// Const version of templated `at` method.
+ template
+ const std::shared_ptr at(size_t i) const {
+ return std::dynamic_pointer_cast(factors_.at(i));
+ }
+
/** Get a specific factor by index (this does not check array bounds, as
* opposed to at() which does).
*/
diff --git a/gtsam/inference/Key.cpp b/gtsam/inference/Key.cpp
index 8fcea0d05..15d633eeb 100644
--- a/gtsam/inference/Key.cpp
+++ b/gtsam/inference/Key.cpp
@@ -10,7 +10,7 @@
* -------------------------------------------------------------------------- */
/**
- * @file Key.h
+ * @file Key.cpp
* @brief
* @author Richard Roberts
* @author Alex Cunningham
@@ -26,6 +26,9 @@ using namespace std;
namespace gtsam {
+/// Assign default key formatter
+KeyFormatter DefaultKeyFormatter = &_defaultKeyFormatter;
+
/* ************************************************************************* */
string _defaultKeyFormatter(Key key) {
const Symbol asSymbol(key);
diff --git a/gtsam/inference/Key.h b/gtsam/inference/Key.h
index 31428a50e..a02d018f5 100644
--- a/gtsam/inference/Key.h
+++ b/gtsam/inference/Key.h
@@ -37,10 +37,16 @@ using KeyFormatter = std::function;
// Helper function for DefaultKeyFormatter
GTSAM_EXPORT std::string _defaultKeyFormatter(Key key);
-/// The default KeyFormatter, which is used if no KeyFormatter is passed to
-/// a nonlinear 'print' function. Automatically detects plain integer keys
-/// and Symbol keys.
-static const KeyFormatter DefaultKeyFormatter = &_defaultKeyFormatter;
+/**
+ * The default KeyFormatter, which is used if no KeyFormatter is passed
+ * to a 'print' function.
+ *
+ * Automatically detects plain integer keys and Symbol keys.
+ *
+ * Marked as `extern` so that it can be updated by external libraries.
+ *
+ */
+extern GTSAM_EXPORT KeyFormatter DefaultKeyFormatter;
// Helper function for Multi-robot Key Formatter
GTSAM_EXPORT std::string _multirobotKeyFormatter(gtsam::Key key);
@@ -124,7 +130,3 @@ struct traits {
};
} // namespace gtsam
-
-
-
-
diff --git a/gtsam/inference/tests/testOrdering.cpp b/gtsam/inference/tests/testOrdering.cpp
index 328d383d8..b6cfcb6ed 100644
--- a/gtsam/inference/tests/testOrdering.cpp
+++ b/gtsam/inference/tests/testOrdering.cpp
@@ -219,6 +219,11 @@ TEST(Ordering, AppendVector) {
Ordering expected{X(0), X(1), X(2)};
EXPECT(assert_equal(expected, actual));
+
+ actual = Ordering();
+ Ordering addl{X(0), X(1), X(2)};
+ actual += addl;
+ EXPECT(assert_equal(expected, actual));
}
/* ************************************************************************* */
diff --git a/gtsam/linear/IterativeSolver.h b/gtsam/linear/IterativeSolver.h
index c4a719436..0441cd9da 100644
--- a/gtsam/linear/IterativeSolver.h
+++ b/gtsam/linear/IterativeSolver.h
@@ -72,8 +72,8 @@ public:
GTSAM_EXPORT virtual void print(std::ostream &os) const;
/* for serialization */
- friend std::ostream& operator<<(std::ostream &os,
- const IterativeOptimizationParameters &p);
+ GTSAM_EXPORT friend std::ostream &operator<<(
+ std::ostream &os, const IterativeOptimizationParameters &p);
GTSAM_EXPORT static Verbosity verbosityTranslator(const std::string &s);
GTSAM_EXPORT static std::string verbosityTranslator(Verbosity v);
diff --git a/gtsam/linear/JacobianFactor.h b/gtsam/linear/JacobianFactor.h
index 375e82698..407ed1e27 100644
--- a/gtsam/linear/JacobianFactor.h
+++ b/gtsam/linear/JacobianFactor.h
@@ -312,6 +312,12 @@ namespace gtsam {
/** Get a view of the A matrix */
ABlock getA() { return Ab_.range(0, size()); }
+ /**
+ * Get a view of the A matrix for the variable
+ * pointed to by the given key.
+ */
+ ABlock getA(const Key& key) { return Ab_(find(key) - begin()); }
+
/** Update an information matrix by adding the information corresponding to this factor
* (used internally during elimination).
* @param scatter A mapping from variable index to slot index in this HessianFactor
diff --git a/gtsam/linear/LossFunctions.cpp b/gtsam/linear/LossFunctions.cpp
index 64ded7cc3..682783e7c 100644
--- a/gtsam/linear/LossFunctions.cpp
+++ b/gtsam/linear/LossFunctions.cpp
@@ -19,6 +19,7 @@
#include
#include
+#include
#include
using namespace std;
@@ -424,6 +425,132 @@ L2WithDeadZone::shared_ptr L2WithDeadZone::Create(double k, const ReweightScheme
return shared_ptr(new L2WithDeadZone(k, reweight));
}
+
+/* ************************************************************************* */
+// AsymmetricTukey
+/* ************************************************************************* */
+
+AsymmetricTukey::AsymmetricTukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {
+ if (c <= 0) {
+ throw runtime_error("mEstimator AsymmetricTukey takes only positive double in constructor.");
+ }
+}
+
+double AsymmetricTukey::weight(double distance) const {
+ distance = -distance;
+ if (distance >= 0.0) {
+ return 1.0;
+ } else if (distance > -c_) {
+ const double one_minus_xc2 = 1.0 - distance * distance / csquared_;
+ return one_minus_xc2 * one_minus_xc2;
+ }
+ return 0.0;
+}
+
+double AsymmetricTukey::loss(double distance) const {
+ distance = -distance;
+ if (distance >= 0.0) {
+ return distance * distance / 2.0;
+ } else if (distance >= -c_) {
+ const double one_minus_xc2 = 1.0 - distance * distance / csquared_;
+ const double t = one_minus_xc2 * one_minus_xc2 * one_minus_xc2;
+ return csquared_ * (1 - t) / 6.0;
+ }
+ return csquared_ / 6.0;
+}
+
+void AsymmetricTukey::print(const std::string &s="") const {
+ std::cout << s << ": AsymmetricTukey (" << c_ << ")" << std::endl;
+}
+
+bool AsymmetricTukey::equals(const Base &expected, double tol) const {
+ const AsymmetricTukey* p = dynamic_cast(&expected);
+ if (p == nullptr) return false;
+ return std::abs(c_ - p->c_) < tol;
+}
+
+AsymmetricTukey::shared_ptr AsymmetricTukey::Create(double c, const ReweightScheme reweight) {
+ return shared_ptr(new AsymmetricTukey(c, reweight));
+}
+
+
+/* ************************************************************************* */
+// AsymmetricCauchy
+/* ************************************************************************* */
+
+AsymmetricCauchy::AsymmetricCauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k), ksquared_(k * k) {
+ if (k <= 0) {
+ throw runtime_error("mEstimator AsymmetricCauchy takes only positive double in constructor.");
+ }
+}
+
+double AsymmetricCauchy::weight(double distance) const {
+ distance = -distance;
+ if (distance >= 0.0) {
+ return 1.0;
+ }
+
+ return ksquared_ / (ksquared_ + distance*distance);
+
+}
+
+double AsymmetricCauchy::loss(double distance) const {
+ distance = -distance;
+ if (distance >= 0.0) {
+ return distance * distance / 2.0;
+ }
+ const double val = std::log1p(distance * distance / ksquared_);
+ return ksquared_ * val * 0.5;
+}
+
+void AsymmetricCauchy::print(const std::string &s="") const {
+ std::cout << s << ": AsymmetricCauchy (" << k_ << ")" << std::endl;
+}
+
+bool AsymmetricCauchy::equals(const Base &expected, double tol) const {
+ const AsymmetricCauchy* p = dynamic_cast(&expected);
+ if (p == nullptr) return false;
+ return std::abs(k_ - p->k_) < tol;
+}
+
+AsymmetricCauchy::shared_ptr AsymmetricCauchy::Create(double k, const ReweightScheme reweight) {
+ return shared_ptr(new AsymmetricCauchy(k, reweight));
+}
+
+
+/* ************************************************************************* */
+// Custom
+/* ************************************************************************* */
+
+Custom::Custom(std::function weight, std::function loss, const ReweightScheme reweight,
+ std::string name)
+ : Base(reweight), weight_(std::move(weight)), loss_(loss), name_(std::move(name)) {}
+
+double Custom::weight(double distance) const {
+ return weight_(distance);
+}
+
+double Custom::loss(double distance) const {
+ return loss_(distance);
+}
+
+void Custom::print(const std::string &s = "") const {
+ std::cout << s << ": Custom (" << name_ << ")" << std::endl;
+}
+
+bool Custom::equals(const Base &expected, double tol) const {
+ const auto *p = dynamic_cast(&expected);
+ if (p == nullptr)
+ return false;
+ return name_ == p->name_ && weight_.target() == p->weight_.target() &&
+ loss_.target() == p->loss_.target() && reweight_ == p->reweight_;
+}
+
+Custom::shared_ptr Custom::Create(std::function weight, std::function loss,
+ const ReweightScheme reweight, const std::string &name) {
+ return std::make_shared(std::move(weight), std::move(loss), reweight, name);
+}
+
} // namespace mEstimator
} // namespace noiseModel
} // gtsam
diff --git a/gtsam/linear/LossFunctions.h b/gtsam/linear/LossFunctions.h
index c78bb512c..a8902e11b 100644
--- a/gtsam/linear/LossFunctions.h
+++ b/gtsam/linear/LossFunctions.h
@@ -15,6 +15,7 @@
* @date Jan 13, 2010
* @author Richard Roberts
* @author Frank Dellaert
+ * @author Fan Jiang
*/
#pragma once
@@ -470,6 +471,123 @@ class GTSAM_EXPORT L2WithDeadZone : public Base {
#endif
};
+/** Implementation of the "AsymmetricTukey" robust error model.
+ *
+ * This model has a scalar parameter "c".
+ *
+ * - Following are all for one side, the other is standard L2
+ * - Loss \rho(x) = c² (1 - (1-x²/c²)³)/6 if |x| shared_ptr;
+
+ AsymmetricTukey(double c = 4.6851, const ReweightScheme reweight = Block);
+ double weight(double distance) const override;
+ double loss(double distance) const override;
+ void print(const std::string &s) const override;
+ bool equals(const Base &expected, double tol = 1e-8) const override;
+ static shared_ptr Create(double k, const ReweightScheme reweight = Block);
+ double modelParameter() const { return c_; }
+
+ private:
+#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION
+ /** Serialization function */
+ friend class boost::serialization::access;
+ template
+ void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
+ ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
+ ar &BOOST_SERIALIZATION_NVP(c_);
+ }
+#endif
+};
+
+/** Implementation of the "AsymmetricCauchy" robust error model.
+ *
+ * This model has a scalar parameter "k".
+ *
+ * - Following are all for one side, the other is standard L2
+ * - Loss \rho(x) = 0.5 k² log(1+x²/k²)
+ * - Derivative \phi(x) = (k²x)/(x²+k²)
+ * - Weight w(x) = \phi(x)/x = k²/(x²+k²)
+ */
+class GTSAM_EXPORT AsymmetricCauchy : public Base {
+ protected:
+ double k_, ksquared_;
+
+ public:
+ typedef std::shared_ptr shared_ptr;
+
+ AsymmetricCauchy(double k = 0.1, const ReweightScheme reweight = Block);
+ double weight(double distance) const override;
+ double loss(double distance) const override;
+ void print(const std::string &s) const override;
+ bool equals(const Base &expected, double tol = 1e-8) const override;
+ static shared_ptr Create(double k, const ReweightScheme reweight = Block);
+ double modelParameter() const { return k_; }
+
+ private:
+#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION
+ /** Serialization function */
+ friend class boost::serialization::access;
+ template
+ void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
+ ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
+ ar &BOOST_SERIALIZATION_NVP(k_);
+ ar &BOOST_SERIALIZATION_NVP(ksquared_);
+ }
+#endif
+};
+
+// Type alias for the custom loss and weight functions
+using CustomLossFunction = std::function;
+using CustomWeightFunction = std::function;
+
+/** Implementation of the "Custom" robust error model.
+ *
+ * This model just takes two functions as input, one for the loss and one for the weight.
+ */
+class GTSAM_EXPORT Custom : public Base {
+ protected:
+ std::function weight_, loss_;
+ std::string name_;
+
+ public:
+ typedef std::shared_ptr shared_ptr;
+
+ Custom(CustomWeightFunction weight, CustomLossFunction loss,
+ const ReweightScheme reweight = Block, std::string name = "Custom");
+ double weight(double distance) const override;
+ double loss(double distance) const override;
+ void print(const std::string &s) const override;
+ bool equals(const Base &expected, double tol = 1e-8) const override;
+ static shared_ptr Create(std::function weight, std::function loss,
+ const ReweightScheme reweight = Block, const std::string &name = "Custom");
+ inline std::string& name() { return name_; }
+
+ inline std::function& weightFunction() { return weight_; }
+ inline std::function& lossFunction() { return loss_; }
+
+ // Default constructor for serialization
+ inline Custom() = default;
+
+ private:
+#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION
+ /** Serialization function */
+ friend class boost::serialization::access;
+ template
+ void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
+ ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
+ ar &BOOST_SERIALIZATION_NVP(name_);
+ }
+#endif
+};
+
} // namespace mEstimator
} // namespace noiseModel
} // namespace gtsam
diff --git a/gtsam/linear/SubgraphBuilder.h b/gtsam/linear/SubgraphBuilder.h
index aafba9306..f9ddd4c9a 100644
--- a/gtsam/linear/SubgraphBuilder.h
+++ b/gtsam/linear/SubgraphBuilder.h
@@ -182,7 +182,7 @@ GaussianFactorGraph buildFactorSubgraph(const GaussianFactorGraph &gfg,
/** Split the graph into a subgraph and the remaining edges.
* Note that the remaining factorgraph has null factors. */
-std::pair splitFactorGraph(
+std::pair GTSAM_EXPORT splitFactorGraph(
const GaussianFactorGraph &factorGraph, const Subgraph &subgraph);
} // namespace gtsam
diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i
index cd5000d9f..d4a045f09 100644
--- a/gtsam/linear/linear.i
+++ b/gtsam/linear/linear.i
@@ -89,6 +89,7 @@ virtual class Unit : gtsam::noiseModel::Isotropic {
namespace mEstimator {
virtual class Base {
+ enum ReweightScheme { Scalar, Block };
void print(string s = "") const;
};
@@ -191,6 +192,36 @@ virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
double loss(double error) const;
};
+virtual class AsymmetricTukey: gtsam::noiseModel::mEstimator::Base {
+ AsymmetricTukey(double k, gtsam::noiseModel::mEstimator::Base::ReweightScheme reweight);
+ static gtsam::noiseModel::mEstimator::AsymmetricTukey* Create(double k);
+
+ // enabling serialization functionality
+ void serializable() const;
+
+ double weight(double error) const;
+ double loss(double error) const;
+};
+
+virtual class Custom: gtsam::noiseModel::mEstimator::Base {
+ Custom(gtsam::noiseModel::mEstimator::CustomWeightFunction weight,
+ gtsam::noiseModel::mEstimator::CustomLossFunction loss,
+ gtsam::noiseModel::mEstimator::Base::ReweightScheme reweight,
+ std::string name);
+ static gtsam::noiseModel::mEstimator::Custom* Create(
+ gtsam::noiseModel::mEstimator::CustomWeightFunction weight,
+ gtsam::noiseModel::mEstimator::CustomLossFunction loss,
+ gtsam::noiseModel::mEstimator::Base::ReweightScheme reweight,
+ std::string name);
+
+ // enabling serialization functionality
+ void serializable() const;
+
+ double weight(double error) const;
+ double loss(double error) const;
+};
+
+
}///\namespace mEstimator
virtual class Robust : gtsam::noiseModel::Base {
diff --git a/gtsam/linear/tests/testGaussianFactorGraph.cpp b/gtsam/linear/tests/testGaussianFactorGraph.cpp
index 2ba00abc1..03222bb3f 100644
--- a/gtsam/linear/tests/testGaussianFactorGraph.cpp
+++ b/gtsam/linear/tests/testGaussianFactorGraph.cpp
@@ -391,8 +391,7 @@ TEST(GaussianFactorGraph, clone) {
EXPECT(assert_equal(init_graph, actCloned)); // Same as the original version
// Apply an in-place change to init_graph and compare
- JacobianFactor::shared_ptr jacFactor0 =
- std::dynamic_pointer_cast(init_graph.at(0));
+ JacobianFactor::shared_ptr jacFactor0 = init_graph.at(0);
CHECK(jacFactor0);
jacFactor0->getA(jacFactor0->begin()) *= 7.;
EXPECT(assert_inequal(init_graph, exp_graph));
diff --git a/gtsam/linear/tests/testJacobianFactor.cpp b/gtsam/linear/tests/testJacobianFactor.cpp
index 0ad77b366..234466620 100644
--- a/gtsam/linear/tests/testJacobianFactor.cpp
+++ b/gtsam/linear/tests/testJacobianFactor.cpp
@@ -65,7 +65,10 @@ TEST(JacobianFactor, constructors_and_accessors)
JacobianFactor actual(terms[0].first, terms[0].second, b, noise);
EXPECT(assert_equal(expected, actual));
LONGS_EQUAL((long)terms[0].first, (long)actual.keys().back());
+ // Key iterator
EXPECT(assert_equal(terms[0].second, actual.getA(actual.end() - 1)));
+ // Key
+ EXPECT(assert_equal(terms[0].second, actual.getA(terms[0].first)));
EXPECT(assert_equal(b, expected.getb()));
EXPECT(assert_equal(b, actual.getb()));
EXPECT(noise == expected.get_model());
@@ -78,7 +81,10 @@ TEST(JacobianFactor, constructors_and_accessors)
terms[1].first, terms[1].second, b, noise);
EXPECT(assert_equal(expected, actual));
LONGS_EQUAL((long)terms[1].first, (long)actual.keys().back());
+ // Key iterator
EXPECT(assert_equal(terms[1].second, actual.getA(actual.end() - 1)));
+ // Key
+ EXPECT(assert_equal(terms[1].second, actual.getA(terms[1].first)));
EXPECT(assert_equal(b, expected.getb()));
EXPECT(assert_equal(b, actual.getb()));
EXPECT(noise == expected.get_model());
@@ -91,7 +97,10 @@ TEST(JacobianFactor, constructors_and_accessors)
terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
EXPECT(assert_equal(expected, actual));
LONGS_EQUAL((long)terms[2].first, (long)actual.keys().back());
+ // Key iterator
EXPECT(assert_equal(terms[2].second, actual.getA(actual.end() - 1)));
+ // Key
+ EXPECT(assert_equal(terms[2].second, actual.getA(terms[2].first)));
EXPECT(assert_equal(b, expected.getb()));
EXPECT(assert_equal(b, actual.getb()));
EXPECT(noise == expected.get_model());
diff --git a/gtsam/linear/tests/testNoiseModel.cpp b/gtsam/linear/tests/testNoiseModel.cpp
index f1830f37f..725680501 100644
--- a/gtsam/linear/tests/testNoiseModel.cpp
+++ b/gtsam/linear/tests/testNoiseModel.cpp
@@ -14,6 +14,7 @@
* @date Jan 13, 2010
* @author Richard Roberts
* @author Frank Dellaert
+ * @author Fan Jiang
*/
@@ -504,6 +505,22 @@ TEST(NoiseModel, robustFunctionCauchy)
DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error4), 1e-8);
}
+TEST(NoiseModel, robustFunctionAsymmetricCauchy)
+{
+ const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
+ const mEstimator::AsymmetricCauchy::shared_ptr cauchy = mEstimator::AsymmetricCauchy::Create(k);
+ DOUBLES_EQUAL(0.961538461538461, cauchy->weight(error1), 1e-8);
+ DOUBLES_EQUAL(0.2000, cauchy->weight(error2), 1e-8);
+ // Test negative value to ensure we take absolute value of error.
+ DOUBLES_EQUAL(1.0, cauchy->weight(error3), 1e-8);
+ DOUBLES_EQUAL(1.0, cauchy->weight(error4), 1e-8);
+
+ DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error1), 1e-8);
+ DOUBLES_EQUAL(20.117973905426254, cauchy->loss(error2), 1e-8);
+ DOUBLES_EQUAL(50.0, cauchy->loss(error3), 1e-8);
+ DOUBLES_EQUAL(0.5, cauchy->loss(error4), 1e-8);
+}
+
TEST(NoiseModel, robustFunctionGemanMcClure)
{
const double k = 1.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
@@ -551,6 +568,22 @@ TEST(NoiseModel, robustFunctionTukey)
DOUBLES_EQUAL(0.480266666666667, tukey->loss(error4), 1e-8);
}
+TEST(NoiseModel, robustFunctionAsymmetricTukey)
+{
+ const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
+ const mEstimator::AsymmetricTukey::shared_ptr tukey = mEstimator::AsymmetricTukey::Create(k);
+ DOUBLES_EQUAL(0.9216, tukey->weight(error1), 1e-8);
+ DOUBLES_EQUAL(0.0, tukey->weight(error2), 1e-8);
+ // Test negative value to ensure we take absolute value of error.
+ DOUBLES_EQUAL(1.0, tukey->weight(error3), 1e-8);
+ DOUBLES_EQUAL(1.0, tukey->weight(error4), 1e-8);
+
+ DOUBLES_EQUAL(0.480266666666667, tukey->loss(error1), 1e-8);
+ DOUBLES_EQUAL(4.166666666666667, tukey->loss(error2), 1e-8);
+ DOUBLES_EQUAL(50.0, tukey->loss(error3), 1e-8);
+ DOUBLES_EQUAL(0.5, tukey->loss(error4), 1e-8);
+}
+
TEST(NoiseModel, robustFunctionDCS)
{
const double k = 1.0, error1 = 1.0, error2 = 10.0;
@@ -670,6 +703,35 @@ TEST(NoiseModel, robustNoiseL2WithDeadZone)
}
}
+/* ************************************************************************* */
+TEST(NoiseModel, robustNoiseCustomHuber) {
+ const double k = 10.0, error1 = 1.0, error2 = 100.0;
+ Matrix A = (Matrix(2, 2) << 1.0, 10.0, 100.0, 1000.0).finished();
+ Vector b = Vector2(error1, error2);
+ const Robust::shared_ptr robust =
+ Robust::Create(mEstimator::Custom::Create(
+ [k](double e) {
+ const auto abs_e = std::abs(e);
+ return abs_e <= k ? 1.0 : k / abs_e;
+ },
+ [k](double e) {
+ const auto abs_e = std::abs(e);
+ return abs_e <= k ? abs_e * abs_e / 2.0 : k * abs_e - k * k / 2.0;
+ },
+ noiseModel::mEstimator::Custom::Scalar, "Huber"),
+ Unit::Create(2));
+
+ robust->WhitenSystem(A, b);
+
+ DOUBLES_EQUAL(error1, b(0), 1e-8);
+ DOUBLES_EQUAL(sqrt(k * error2), b(1), 1e-8);
+
+ DOUBLES_EQUAL(1.0, A(0, 0), 1e-8);
+ DOUBLES_EQUAL(10.0, A(0, 1), 1e-8);
+ DOUBLES_EQUAL(sqrt(k * 100.0), A(1, 0), 1e-8);
+ DOUBLES_EQUAL(sqrt(k / 100.0) * 1000.0, A(1, 1), 1e-8);
+}
+
TEST(NoiseModel, lossFunctionAtZero)
{
const double k = 5.0;
@@ -697,6 +759,12 @@ TEST(NoiseModel, lossFunctionAtZero)
auto lsdz = mEstimator::L2WithDeadZone::Create(k);
DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8);
DOUBLES_EQUAL(lsdz->weight(0), 0, 1e-8);
+ auto assy_cauchy = mEstimator::AsymmetricCauchy::Create(k);
+ DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8);
+ DOUBLES_EQUAL(lsdz->weight(0), 0, 1e-8);
+ auto assy_tukey = mEstimator::AsymmetricTukey::Create(k);
+ DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8);
+ DOUBLES_EQUAL(lsdz->weight(0), 0, 1e-8);
}
diff --git a/gtsam/nonlinear/GncOptimizer.h b/gtsam/nonlinear/GncOptimizer.h
index d59eb4371..b646d009e 100644
--- a/gtsam/nonlinear/GncOptimizer.h
+++ b/gtsam/nonlinear/GncOptimizer.h
@@ -65,10 +65,9 @@ class GncOptimizer {
nfg_.resize(graph.size());
for (size_t i = 0; i < graph.size(); i++) {
if (graph[i]) {
- NoiseModelFactor::shared_ptr factor = std::dynamic_pointer_cast<
- NoiseModelFactor>(graph[i]);
- auto robust = std::dynamic_pointer_cast<
- noiseModel::Robust>(factor->noiseModel());
+ NoiseModelFactor::shared_ptr factor = graph.at(i);
+ auto robust =
+ std::dynamic_pointer_cast(factor->noiseModel());
// if the factor has a robust loss, we remove the robust loss
nfg_[i] = robust ? factor-> cloneWithNewNoiseModel(robust->noise()) : factor;
}
@@ -401,11 +400,9 @@ class GncOptimizer {
newGraph.resize(nfg_.size());
for (size_t i = 0; i < nfg_.size(); i++) {
if (nfg_[i]) {
- auto factor = std::dynamic_pointer_cast<
- NoiseModelFactor>(nfg_[i]);
- auto noiseModel =
- std::dynamic_pointer_cast(
- factor->noiseModel());
+ auto factor = nfg_.at(i);
+ auto noiseModel = std::dynamic_pointer_cast(
+ factor->noiseModel());
if (noiseModel) {
Matrix newInfo = weights[i] * noiseModel->information();
auto newNoiseModel = noiseModel::Gaussian::Information(newInfo);
diff --git a/gtsam/nonlinear/ISAM2.cpp b/gtsam/nonlinear/ISAM2.cpp
index f4c61e659..0b5449b58 100644
--- a/gtsam/nonlinear/ISAM2.cpp
+++ b/gtsam/nonlinear/ISAM2.cpp
@@ -556,9 +556,12 @@ void ISAM2::marginalizeLeaves(
// We do not need the marginal factors associated with this clique
// because their information is already incorporated in the new
// marginal factor. So, now associate this marginal factor with the
- // parent of this clique.
- marginalFactors[clique->parent()->conditional()->front()].push_back(
- marginalFactor);
+ // parent of this clique. If the clique is a root and has no parent, then
+ // we can discard it without keeping track of the marginal factor.
+ if (clique->parent()) {
+ marginalFactors[clique->parent()->conditional()->front()].push_back(
+ marginalFactor);
+ }
// Now remove this clique and its subtree - all of its marginal
// information has been stored in marginalFactors.
trackingRemoveSubtree(clique);
@@ -636,7 +639,7 @@ void ISAM2::marginalizeLeaves(
// Make the clique's matrix appear as a subset
const DenseIndex dimToRemove = cg->matrixObject().offset(nToRemove);
- cg->matrixObject().firstBlock() = nToRemove;
+ cg->matrixObject().firstBlock() += nToRemove;
cg->matrixObject().rowStart() = dimToRemove;
// Change the keys in the clique
@@ -662,42 +665,55 @@ void ISAM2::marginalizeLeaves(
// At this point we have updated the BayesTree, now update the remaining iSAM2
// data structures
+ // Remove the factors to remove that will be summarized in marginal factors
+ NonlinearFactorGraph removedFactors;
+ for (const auto index : factorIndicesToRemove) {
+ removedFactors.push_back(nonlinearFactors_[index]);
+ nonlinearFactors_.remove(index);
+ if (params_.cacheLinearizedFactors) {
+ linearFactors_.remove(index);
+ }
+ }
+ variableIndex_.remove(factorIndicesToRemove.begin(),
+ factorIndicesToRemove.end(), removedFactors);
+
// Gather factors to add - the new marginal factors
- GaussianFactorGraph factorsToAdd;
+ GaussianFactorGraph factorsToAdd{};
+ NonlinearFactorGraph nonlinearFactorsToAdd{};
for (const auto& key_factors : marginalFactors) {
for (const auto& factor : key_factors.second) {
if (factor) {
factorsToAdd.push_back(factor);
- if (marginalFactorsIndices)
- marginalFactorsIndices->push_back(nonlinearFactors_.size());
- nonlinearFactors_.push_back(
- std::make_shared(factor));
- if (params_.cacheLinearizedFactors) linearFactors_.push_back(factor);
+ nonlinearFactorsToAdd.emplace_shared(factor);
for (Key factorKey : *factor) {
fixedVariables_.insert(factorKey);
}
}
}
}
- variableIndex_.augment(factorsToAdd); // Augment the variable index
-
- // Remove the factors to remove that have been summarized in the newly-added
- // marginal factors
- NonlinearFactorGraph removedFactors;
- for (const auto index : factorIndicesToRemove) {
- removedFactors.push_back(nonlinearFactors_[index]);
- nonlinearFactors_.remove(index);
- if (params_.cacheLinearizedFactors) linearFactors_.remove(index);
+ // Add the nonlinear factors and keep track of the new factor indices
+ auto newFactorIndices = nonlinearFactors_.add_factors(nonlinearFactorsToAdd,
+ params_.findUnusedFactorSlots);
+ // Add cached linear factors.
+ if (params_.cacheLinearizedFactors){
+ linearFactors_.resize(nonlinearFactors_.size());
+ for (std::size_t i = 0; i < nonlinearFactorsToAdd.size(); ++i){
+ linearFactors_[newFactorIndices[i]] = factorsToAdd[i];
+ }
}
- variableIndex_.remove(factorIndicesToRemove.begin(),
- factorIndicesToRemove.end(), removedFactors);
-
- if (deletedFactorsIndices)
- deletedFactorsIndices->assign(factorIndicesToRemove.begin(),
- factorIndicesToRemove.end());
+ // Augment the variable index
+ variableIndex_.augment(factorsToAdd, newFactorIndices);
// Remove the marginalized variables
removeVariables(KeySet(leafKeys.begin(), leafKeys.end()));
+
+ if (deletedFactorsIndices) {
+ deletedFactorsIndices->assign(factorIndicesToRemove.begin(),
+ factorIndicesToRemove.end());
+ }
+ if (marginalFactorsIndices){
+ *marginalFactorsIndices = std::move(newFactorIndices);
+ }
}
/* ************************************************************************* */
diff --git a/gtsam/nonlinear/ISAM2.h b/gtsam/nonlinear/ISAM2.h
index 50c5058c2..d3abd95fd 100644
--- a/gtsam/nonlinear/ISAM2.h
+++ b/gtsam/nonlinear/ISAM2.h
@@ -345,7 +345,7 @@ class GTSAM_EXPORT ISAM2 : public BayesTree {
friend class boost::serialization::access;
template
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
- ar & boost::serialization::base_object >(*this);
+ ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(theta_);
ar & BOOST_SERIALIZATION_NVP(variableIndex_);
ar & BOOST_SERIALIZATION_NVP(delta_);
diff --git a/gtsam/nonlinear/NonlinearFactor.h b/gtsam/nonlinear/NonlinearFactor.h
index 57c4a00eb..725117748 100644
--- a/gtsam/nonlinear/NonlinearFactor.h
+++ b/gtsam/nonlinear/NonlinearFactor.h
@@ -46,7 +46,7 @@ namespace gtsam {
* Had to use the static_cast of a nullptr, because the compiler is not able to
* deduce the type of the nullptr when expanding the evaluateError templates.
*/
-#define OptionalNone static_cast(nullptr)
+#define OptionalNone static_cast(nullptr)
/** This typedef will be used everywhere boost::optional reference was used
* previously. This is used to indicate that the Jacobian is optional. In the future
diff --git a/gtsam/nonlinear/nonlinear.i b/gtsam/nonlinear/nonlinear.i
index 06451ab1f..3f5fb1dd5 100644
--- a/gtsam/nonlinear/nonlinear.i
+++ b/gtsam/nonlinear/nonlinear.i
@@ -86,7 +86,10 @@ class NonlinearFactorGraph {
const gtsam::noiseModel::Base* noiseModel);
// NonlinearFactorGraph
- void printErrors(const gtsam::Values& values) const;
+ void printErrors(const gtsam::Values& values,
+ const string& str = "NonlinearFactorGraph: ",
+ const gtsam::KeyFormatter& keyFormatter =
+ gtsam::DefaultKeyFormatter) const;
double error(const gtsam::Values& values) const;
double probPrime(const gtsam::Values& values) const;
gtsam::Ordering orderingCOLAMD() const;
diff --git a/gtsam/nonlinear/tests/testSerializationNonlinear.cpp b/gtsam/nonlinear/tests/testSerializationNonlinear.cpp
index f402656c1..d6f693a23 100644
--- a/gtsam/nonlinear/tests/testSerializationNonlinear.cpp
+++ b/gtsam/nonlinear/tests/testSerializationNonlinear.cpp
@@ -183,7 +183,7 @@ TEST(Serialization, ISAM2) {
std::string binaryPath = "saved_solver.dat";
try {
- std::ofstream outputStream(binaryPath);
+ std::ofstream outputStream(binaryPath, std::ios::out | std::ios::binary);
boost::archive::binary_oarchive outputArchive(outputStream);
outputArchive << solver;
} catch(...) {
@@ -192,7 +192,7 @@ TEST(Serialization, ISAM2) {
gtsam::ISAM2 solverFromDisk;
try {
- std::ifstream ifs(binaryPath);
+ std::ifstream ifs(binaryPath, std::ios::in | std::ios::binary);
boost::archive::binary_iarchive inputArchive(ifs);
inputArchive >> solverFromDisk;
} catch(...) {
diff --git a/gtsam/nonlinear/tests/testValues.cpp b/gtsam/nonlinear/tests/testValues.cpp
index 2a7bede30..a2b265a56 100644
--- a/gtsam/nonlinear/tests/testValues.cpp
+++ b/gtsam/nonlinear/tests/testValues.cpp
@@ -581,7 +581,7 @@ TEST(Values, std_move) {
TEST(Values, VectorDynamicInsertFixedRead) {
Values values;
Vector v(3); v << 5.0, 6.0, 7.0;
- values.insert(key1, v);
+ values.insert(key1, v);
Vector3 expected(5.0, 6.0, 7.0);
Vector3 actual = values.at(key1);
CHECK(assert_equal(expected, actual));
@@ -629,7 +629,7 @@ TEST(Values, VectorFixedInsertFixedRead) {
TEST(Values, MatrixDynamicInsertFixedRead) {
Values values;
Matrix v(1,3); v << 5.0, 6.0, 7.0;
- values.insert(key1, v);
+ values.insert(key1, v);
Vector3 expected(5.0, 6.0, 7.0);
CHECK(assert_equal((Vector)expected, values.at(key1)));
CHECK_EXCEPTION(values.at(key1), exception);
@@ -639,7 +639,15 @@ TEST(Values, Demangle) {
Values values;
Matrix13 v; v << 5.0, 6.0, 7.0;
values.insert(key1, v);
- string expected = "Values with 1 values:\nValue v1: (Eigen::Matrix)\n[\n 5, 6, 7\n]\n\n";
+#ifdef __GNUG__
+ string expected =
+ "Values with 1 values:\nValue v1: (Eigen::Matrix)\n[\n 5, 6, 7\n]\n\n";
+#elif _WIN32
+ string expected =
+ "Values with 1 values:\nValue v1: "
+ "(class Eigen::Matrix)\n[\n 5, 6, 7\n]\n\n";
+#endif
EXPECT(assert_print_equal(expected, values));
}
diff --git a/gtsam/nonlinear/values.i b/gtsam/nonlinear/values.i
index e36b1cf1c..1b0322699 100644
--- a/gtsam/nonlinear/values.i
+++ b/gtsam/nonlinear/values.i
@@ -59,10 +59,12 @@ class Values {
// enabling serialization functionality
void serialize() const;
- // New in 4.0, we have to specialize every insert/update/at to generate
- // wrappers Instead of the old: void insert(size_t j, const gtsam::Value&
- // value); void update(size_t j, const gtsam::Value& val); gtsam::Value
- // at(size_t j) const;
+ // New in 4.0, we have to specialize every insert/update/at
+ // to generate wrappers.
+ // Instead of the old:
+ // void insert(size_t j, const gtsam::Value& value);
+ // void update(size_t j, const gtsam::Value& val);
+ // gtsam::Value at(size_t j) const;
// The order is important: Vector has to precede Point2/Point3 so `atVector`
// can work for those fixed-size vectors.
@@ -99,6 +101,10 @@ class Values {
template
void insert(size_t j, const T& val);
+ // The order is important: Vector has to precede Point2/Point3 so `atVector`
+ // can work for those fixed-size vectors.
+ void update(size_t j, Vector vector);
+ void update(size_t j, Matrix matrix);
void update(size_t j, const gtsam::Point2& point2);
void update(size_t j, const gtsam::Point3& point3);
void update(size_t j, const gtsam::Rot2& rot2);
@@ -125,10 +131,12 @@ class Values {
void update(size_t j, const gtsam::PinholePose& camera);
void update(size_t j, const gtsam::imuBias::ConstantBias& constant_bias);
void update(size_t j, const gtsam::NavState& nav_state);
- void update(size_t j, Vector vector);
- void update(size_t j, Matrix matrix);
void update(size_t j, double c);
+ // The order is important: Vector has to precede Point2/Point3 so `atVector`
+ // can work for those fixed-size vectors.
+ void insert_or_assign(size_t j, Vector vector);
+ void insert_or_assign(size_t j, Matrix matrix);
void insert_or_assign(size_t j, const gtsam::Point2& point2);
void insert_or_assign(size_t j, const gtsam::Point3& point3);
void insert_or_assign(size_t j, const gtsam::Rot2& rot2);
@@ -165,8 +173,6 @@ class Values {
void insert_or_assign(size_t j,
const gtsam::imuBias::ConstantBias& constant_bias);
void insert_or_assign(size_t j, const gtsam::NavState& nav_state);
- void insert_or_assign(size_t j, Vector vector);
- void insert_or_assign(size_t j, Matrix matrix);
void insert_or_assign(size_t j, double c);
template tracksFromPairwiseMatches(
}
// TODO(johnwlambert): return the Transitivity failure percentage here.
- return tracks2d;
+ return validTracks;
}
} // namespace gtsfm
diff --git a/gtsam/sfm/TranslationRecovery.h b/gtsam/sfm/TranslationRecovery.h
index 44a5ef43e..4848d7cfa 100644
--- a/gtsam/sfm/TranslationRecovery.h
+++ b/gtsam/sfm/TranslationRecovery.h
@@ -48,7 +48,7 @@ namespace gtsam {
// where s is an arbitrary scale that can be supplied, default 1.0. Hence, two
// versions are supplied below corresponding to whether we have initial values
// or not.
-class TranslationRecovery {
+class GTSAM_EXPORT TranslationRecovery {
public:
using KeyPair = std::pair;
using TranslationEdges = std::vector>;
diff --git a/gtsam/sfm/tests/testShonanAveraging.cpp b/gtsam/sfm/tests/testShonanAveraging.cpp
index dd4759daa..dfa725ab6 100644
--- a/gtsam/sfm/tests/testShonanAveraging.cpp
+++ b/gtsam/sfm/tests/testShonanAveraging.cpp
@@ -372,9 +372,11 @@ TEST(ShonanAveraging2, noisyToyGraphWithHuber) {
// test that each factor is actually robust
for (size_t i=0; i<=4; i++) { // note: last is the Gauge factor and is not robust
- const auto &robust = std::dynamic_pointer_cast(
- std::dynamic_pointer_cast(graph[i])->noiseModel());
- EXPECT(robust); // we expect the factors to be use a robust noise model (in particular, Huber)
+ const auto &robust = std::dynamic_pointer_cast(
+ graph.at(i)->noiseModel());
+ // we expect the factors to be use a robust noise model
+ // (in particular, Huber)
+ EXPECT(robust);
}
// test result
diff --git a/gtsam/slam/SmartFactorBase.h b/gtsam/slam/SmartFactorBase.h
index e0540cc41..5d6ec4445 100644
--- a/gtsam/slam/SmartFactorBase.h
+++ b/gtsam/slam/SmartFactorBase.h
@@ -162,8 +162,9 @@ protected:
/// Collect all cameras: important that in key order.
virtual Cameras cameras(const Values& values) const {
Cameras cameras;
- for(const Key& k: this->keys_)
+ for(const Key& k: this->keys_) {
cameras.push_back(values.at(k));
+ }
return cameras;
}
diff --git a/gtsam/slam/expressions.h b/gtsam/slam/expressions.h
index d1bfab7f2..8d34ce681 100644
--- a/gtsam/slam/expressions.h
+++ b/gtsam/slam/expressions.h
@@ -155,10 +155,10 @@ Point2_ project2(const Expression& camera_, const Expression& p_)
namespace internal {
// Helper template for project3 expression below
template
-inline Point2 project6(const Pose3& x, const Point3& p, const Cal3_S2& K,
+inline Point2 project6(const Pose3& x, const POINT& p, const CALIBRATION& K,
OptionalJacobian<2, 6> Dpose, OptionalJacobian<2, 3> Dpoint,
- OptionalJacobian<2, 5> Dcal) {
- return PinholeCamera(x, K).project(p, Dpose, Dpoint, Dcal);
+ OptionalJacobian<2, CALIBRATION::dimension> Dcal) {
+ return PinholeCamera(x, K).project(p, Dpose, Dpoint, Dcal);
}
}
diff --git a/gtsam/slam/slam.i b/gtsam/slam/slam.i
index d35a87eee..7135137bb 100644
--- a/gtsam/slam/slam.i
+++ b/gtsam/slam/slam.i
@@ -156,6 +156,9 @@ virtual class SmartProjectionPoseFactor : gtsam::NonlinearFactor {
// enabling serialization functionality
void serialize() const;
+
+ gtsam::TriangulationResult point() const;
+ gtsam::TriangulationResult point(const gtsam::Values& values) const;
};
typedef gtsam::SmartProjectionPoseFactor
diff --git a/gtsam/slam/tests/testDataset.cpp b/gtsam/slam/tests/testDataset.cpp
index 8591a3932..13104174f 100644
--- a/gtsam/slam/tests/testDataset.cpp
+++ b/gtsam/slam/tests/testDataset.cpp
@@ -97,8 +97,7 @@ TEST(dataSet, load2D) {
auto model = noiseModel::Unit::Create(3);
BetweenFactor expected(1, 0, Pose2(-0.99879, 0.0417574, -0.00818381),
model);
- BetweenFactor::shared_ptr actual =
- std::dynamic_pointer_cast>(graph->at(0));
+ BetweenFactor::shared_ptr actual = graph->at>(0);
EXPECT(assert_equal(expected, *actual));
// Check binary measurements, Pose2
@@ -113,9 +112,8 @@ TEST(dataSet, load2D) {
// // Check factor parsing
const auto actualFactors = parseFactors(filename);
for (size_t i : {0, 1, 2, 3, 4, 5}) {
- EXPECT(assert_equal(
- *std::dynamic_pointer_cast>(graph->at(i)),
- *actualFactors[i], 1e-5));
+ EXPECT(assert_equal(*graph->at>(i), *actualFactors[i],
+ 1e-5));
}
// Check pose parsing
@@ -194,9 +192,8 @@ TEST(dataSet, readG2o3D) {
// Check factor parsing
const auto actualFactors = parseFactors(g2oFile);
for (size_t i : {0, 1, 2, 3, 4, 5}) {
- EXPECT(assert_equal(
- *std::dynamic_pointer_cast>(expectedGraph[i]),
- *actualFactors[i], 1e-5));
+ EXPECT(assert_equal(*expectedGraph.at>(i),
+ *actualFactors[i], 1e-5));
}
// Check pose parsing
diff --git a/gtsam_unstable/CMakeLists.txt b/gtsam_unstable/CMakeLists.txt
index 30f096772..1f7b4a7c7 100644
--- a/gtsam_unstable/CMakeLists.txt
+++ b/gtsam_unstable/CMakeLists.txt
@@ -82,7 +82,7 @@ if(GTSAM_SUPPORT_NESTED_DISSECTION) # Only build partition if metis is built
endif(GTSAM_SUPPORT_NESTED_DISSECTION)
# Versions - same as core gtsam library
-set(gtsam_unstable_version ${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH})
+set(gtsam_unstable_version ${GTSAM_VERSION_STRING})
set(gtsam_unstable_soversion ${GTSAM_VERSION_MAJOR})
message(STATUS "GTSAM_UNSTABLE Version: ${gtsam_unstable_version}")
message(STATUS "Install prefix: ${CMAKE_INSTALL_PREFIX}")
diff --git a/gtsam_unstable/discrete/examples/CMakeLists.txt b/gtsam_unstable/discrete/examples/CMakeLists.txt
index ba4e278c9..16e057cfe 100644
--- a/gtsam_unstable/discrete/examples/CMakeLists.txt
+++ b/gtsam_unstable/discrete/examples/CMakeLists.txt
@@ -12,4 +12,4 @@ endif()
-gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam_unstable")
+gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam_unstable" ${GTSAM_BUILD_EXAMPLES_ALWAYS})
diff --git a/gtsam_unstable/discrete/tests/testLoopyBelief.cpp b/gtsam_unstable/discrete/tests/testLoopyBelief.cpp
index 7c769fd78..39d9d743b 100644
--- a/gtsam_unstable/discrete/tests/testLoopyBelief.cpp
+++ b/gtsam_unstable/discrete/tests/testLoopyBelief.cpp
@@ -183,13 +183,10 @@ class LoopyBelief {
// accumulate unary factors
if (graph.at(factorIndex)->size() == 1) {
if (!prodOfUnaries)
- prodOfUnaries = std::dynamic_pointer_cast(
- graph.at(factorIndex));
+ prodOfUnaries = graph.at(factorIndex);
else
prodOfUnaries = std::make_shared(
- *prodOfUnaries *
- (*std::dynamic_pointer_cast(
- graph.at(factorIndex))));
+ *prodOfUnaries * (*graph.at(factorIndex)));
}
}
diff --git a/gtsam_unstable/examples/CMakeLists.txt b/gtsam_unstable/examples/CMakeLists.txt
index 967937b22..120f293e7 100644
--- a/gtsam_unstable/examples/CMakeLists.txt
+++ b/gtsam_unstable/examples/CMakeLists.txt
@@ -9,4 +9,4 @@ if (NOT GTSAM_USE_BOOST_FEATURES)
endif()
-gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam_unstable")
+gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam_unstable" ${GTSAM_BUILD_EXAMPLES_ALWAYS})
diff --git a/gtsam_unstable/geometry/Event.h b/gtsam_unstable/geometry/Event.h
index a4055d038..a3c907646 100644
--- a/gtsam_unstable/geometry/Event.h
+++ b/gtsam_unstable/geometry/Event.h
@@ -34,7 +34,7 @@ namespace gtsam {
* SLAM, where we have "time of arrival" measurements at a set of sensors. The
* TOA functor below provides a measurement function for those applications.
*/
-class Event {
+class GTSAM_UNSTABLE_EXPORT Event {
double time_; ///< Time event was generated
Point3 location_; ///< Location at time event was generated
@@ -62,10 +62,10 @@ class Event {
}
/** print with optional string */
- GTSAM_UNSTABLE_EXPORT void print(const std::string& s = "") const;
+ void print(const std::string& s = "") const;
/** equals with an tolerance */
- GTSAM_UNSTABLE_EXPORT bool equals(const Event& other,
+ bool equals(const Event& other,
double tol = 1e-9) const;
/// Updates a with tangent space delta
diff --git a/gtsam_unstable/linear/LP.h b/gtsam_unstable/linear/LP.h
index dbb744d3e..53247be4c 100644
--- a/gtsam_unstable/linear/LP.h
+++ b/gtsam_unstable/linear/LP.h
@@ -29,7 +29,7 @@ namespace gtsam {
using namespace std;
/// Mapping between variable's key and its corresponding dimensionality
-using KeyDimMap = std::map;
+using KeyDimMap = std::map;
/*
* Iterates through every factor in a linear graph and generates a
* mapping between every factor key and it's corresponding dimensionality.
diff --git a/gtsam_unstable/linear/LPInitSolver.cpp b/gtsam_unstable/linear/LPInitSolver.cpp
index 9f12d670e..3d24f784b 100644
--- a/gtsam_unstable/linear/LPInitSolver.cpp
+++ b/gtsam_unstable/linear/LPInitSolver.cpp
@@ -65,9 +65,7 @@ GaussianFactorGraph::shared_ptr LPInitSolver::buildInitOfInitGraph() const {
new GaussianFactorGraph(lp_.equalities));
// create factor ||x||^2 and add to the graph
- const KeyDimMap& constrainedKeyDim = lp_.constrainedKeyDimMap();
- for (const auto& [key, _] : constrainedKeyDim) {
- size_t dim = constrainedKeyDim.at(key);
+ for (const auto& [key, dim] : lp_.constrainedKeyDimMap()) {
initGraph->push_back(
JacobianFactor(key, Matrix::Identity(dim, dim), Vector::Zero(dim)));
}
diff --git a/gtsam_unstable/nonlinear/tests/testNonlinearClusterTree.cpp b/gtsam_unstable/nonlinear/tests/testNonlinearClusterTree.cpp
index 38cfd7348..bbb461abb 100644
--- a/gtsam_unstable/nonlinear/tests/testNonlinearClusterTree.cpp
+++ b/gtsam_unstable/nonlinear/tests/testNonlinearClusterTree.cpp
@@ -87,7 +87,7 @@ TEST(NonlinearClusterTree, Clusters) {
Ordering ordering;
ordering.push_back(x1);
const auto [bn, fg] = gfg->eliminatePartialSequential(ordering);
- auto expectedFactor = std::dynamic_pointer_cast(fg->at(0));
+ auto expectedFactor = fg->at(0);
if (!expectedFactor)
throw std::runtime_error("Expected HessianFactor");
diff --git a/gtsam_unstable/partition/FindSeparator-inl.h b/gtsam_unstable/partition/FindSeparator-inl.h
index e991c5af2..f4718f7a9 100644
--- a/gtsam_unstable/partition/FindSeparator-inl.h
+++ b/gtsam_unstable/partition/FindSeparator-inl.h
@@ -39,7 +39,7 @@ namespace gtsam { namespace partition {
* whether node j is in the left part of the graph, the right part, or the
* separator, respectively
*/
- std::pair separatorMetis(idx_t n, const sharedInts& xadj,
+ std::pair separatorMetis(idx_t n, const sharedInts& xadj,
const sharedInts& adjncy, const sharedInts& adjwgt, bool verbose) {
// control parameters
@@ -277,7 +277,7 @@ namespace gtsam { namespace partition {
//throw runtime_error("separatorPartitionByMetis:stop for debug");
}
- if(result.C.size() != sepsize) {
+ if(result.C.size() != size_t(sepsize)) {
std::cout << "total key: " << keys.size()
<< " result(A,B,C) = " << result.A.size() << ", " << result.B.size() << ", " << result.C.size()
<< "; sepsize from Metis = " << sepsize << std::endl;
diff --git a/gtsam_unstable/partition/GenericGraph.h b/gtsam_unstable/partition/GenericGraph.h
index bcfd77336..a7f2c1ea3 100644
--- a/gtsam_unstable/partition/GenericGraph.h
+++ b/gtsam_unstable/partition/GenericGraph.h
@@ -14,6 +14,7 @@
#include
#include
#include
+#include
#include "PartitionWorkSpace.h"
@@ -49,7 +50,7 @@ namespace gtsam { namespace partition {
typedef std::vector GenericGraph2D;
/** merge nodes in DSF using constraints captured by the given graph */
- std::list > findIslands(const GenericGraph2D& graph, const std::vector& keys, WorkSpace& workspace,
+ std::list > GTSAM_UNSTABLE_EXPORT findIslands(const GenericGraph2D& graph, const std::vector& keys, WorkSpace& workspace,
const int minNrConstraintsPerCamera, const int minNrConstraintsPerLandmark);
/** eliminate the sensors from generic graph */
@@ -97,11 +98,11 @@ namespace gtsam { namespace partition {
typedef std::vector GenericGraph3D;
/** merge nodes in DSF using constraints captured by the given graph */
- std::list > findIslands(const GenericGraph3D& graph, const std::vector& keys, WorkSpace& workspace,
+ std::list > GTSAM_UNSTABLE_EXPORT findIslands(const GenericGraph3D& graph, const std::vector& keys, WorkSpace& workspace,
const size_t minNrConstraintsPerCamera, const size_t minNrConstraintsPerLandmark);
/** eliminate the sensors from generic graph */
- void reduceGenericGraph(const GenericGraph3D& graph, const std::vector& cameraKeys, const std::vector& landmarkKeys,
+ void GTSAM_UNSTABLE_EXPORT reduceGenericGraph(const GenericGraph3D& graph, const std::vector& cameraKeys, const std::vector& landmarkKeys,
const std::vector& dictionary, GenericGraph3D& reducedGraph);
/** check whether the 3D graph is singular (under constrained) */
diff --git a/gtsam_unstable/partition/tests/CMakeLists.txt b/gtsam_unstable/partition/tests/CMakeLists.txt
index 0b918e497..4ca6b9186 100644
--- a/gtsam_unstable/partition/tests/CMakeLists.txt
+++ b/gtsam_unstable/partition/tests/CMakeLists.txt
@@ -1,6 +1,6 @@
set(ignore_test "testNestedDissection.cpp")
-if (NOT GTSAM_USE_BOOST_FEATURES)
+if (NOT GTSAM_USE_BOOST_FEATURES OR MSVC)
list(APPEND ignore_test "testFindSeparator.cpp")
endif()
diff --git a/gtsam_unstable/slam/ProjectionFactorPPPC.h b/gtsam_unstable/slam/ProjectionFactorPPPC.h
index 5b6a83a33..df63330df 100644
--- a/gtsam_unstable/slam/ProjectionFactorPPPC.h
+++ b/gtsam_unstable/slam/ProjectionFactorPPPC.h
@@ -32,7 +32,7 @@ namespace gtsam {
* @ingroup slam
*/
template
-class GTSAM_UNSTABLE_EXPORT ProjectionFactorPPPC
+class ProjectionFactorPPPC
: public NoiseModelFactorN {
protected:
Point2 measured_; ///< 2D measurement
diff --git a/gtsam_unstable/slam/SmartProjectionPoseFactorRollingShutter.h b/gtsam_unstable/slam/SmartProjectionPoseFactorRollingShutter.h
index 4af0be751..4a2047ee1 100644
--- a/gtsam_unstable/slam/SmartProjectionPoseFactorRollingShutter.h
+++ b/gtsam_unstable/slam/SmartProjectionPoseFactorRollingShutter.h
@@ -42,7 +42,7 @@ namespace gtsam {
* @ingroup slam
*/
template
-class GTSAM_UNSTABLE_EXPORT SmartProjectionPoseFactorRollingShutter
+class SmartProjectionPoseFactorRollingShutter
: public SmartProjectionFactor {
private:
typedef SmartProjectionFactor Base;
diff --git a/gtsam_unstable/slam/SmartStereoProjectionFactorPP.h b/gtsam_unstable/slam/SmartStereoProjectionFactorPP.h
index f9db90953..189c501bb 100644
--- a/gtsam_unstable/slam/SmartStereoProjectionFactorPP.h
+++ b/gtsam_unstable/slam/SmartStereoProjectionFactorPP.h
@@ -201,10 +201,9 @@ class GTSAM_UNSTABLE_EXPORT SmartStereoProjectionFactorPP
}
/// linearize and return a Hessianfactor that is an approximation of error(p)
- std::shared_ptr > createHessianFactor(
- const Values& values, const double lambda = 0.0, bool diagonalDamping =
- false) const {
-
+ std::shared_ptr> createHessianFactor(
+ const Values& values, const double lambda = 0.0,
+ bool diagonalDamping = false) const {
// we may have multiple cameras sharing the same extrinsic cals, hence the number
// of keys may be smaller than 2 * nrMeasurements (which is the upper bound where we
// have a body key and an extrinsic calibration key for each measurement)
@@ -212,23 +211,22 @@ class GTSAM_UNSTABLE_EXPORT SmartStereoProjectionFactorPP
// Create structures for Hessian Factors
KeyVector js;
- std::vector < Matrix > Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2);
+ std::vector Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2);
std::vector gs(nrUniqueKeys);
if (this->measured_.size() != cameras(values).size())
throw std::runtime_error("SmartStereoProjectionHessianFactor: this->"
- "measured_.size() inconsistent with input");
+ "measured_.size() inconsistent with input");
// triangulate 3D point at given linearization point
triangulateSafe(cameras(values));
- if (!result_) { // failed: return "empty/zero" Hessian
- for (Matrix& m : Gs)
- m = Matrix::Zero(DimPose, DimPose);
- for (Vector& v : gs)
- v = Vector::Zero(DimPose);
- return std::make_shared < RegularHessianFactor
- > (keys_, Gs, gs, 0.0);
+ // failed: return "empty/zero" Hessian
+ if (!result_) {
+ for (Matrix& m : Gs) m = Matrix::Zero(DimPose, DimPose);
+ for (Vector& v : gs) v = Vector::Zero(DimPose);
+ return std::make_shared>(keys_, Gs, gs,
+ 0.0);
}
// compute Jacobian given triangulated 3D Point
@@ -239,12 +237,13 @@ class GTSAM_UNSTABLE_EXPORT SmartStereoProjectionFactorPP
// Whiten using noise model
noiseModel_->WhitenSystem(E, b);
- for (size_t i = 0; i < Fs.size(); i++)
+ for (size_t i = 0; i < Fs.size(); i++) {
Fs[i] = noiseModel_->Whiten(Fs[i]);
+ }
// build augmented Hessian (with last row/column being the information vector)
Matrix3 P;
- Cameras::ComputePointCovariance <3> (P, E, lambda, diagonalDamping);
+ Cameras::ComputePointCovariance<3>(P, E, lambda, diagonalDamping);
// these are the keys that correspond to the blocks in augmentedHessian (output of SchurComplement)
KeyVector nonuniqueKeys;
@@ -254,11 +253,12 @@ class GTSAM_UNSTABLE_EXPORT SmartStereoProjectionFactorPP
}
// but we need to get the augumented hessian wrt the unique keys in key_
SymmetricBlockMatrix augmentedHessianUniqueKeys =
- Cameras::SchurComplementAndRearrangeBlocks<3,DimBlock,DimPose>(Fs,E,P,b,
- nonuniqueKeys, keys_);
+ Base::Cameras::template SchurComplementAndRearrangeBlocks<3, DimBlock,
+ DimPose>(
+ Fs, E, P, b, nonuniqueKeys, keys_);
- return std::make_shared < RegularHessianFactor
- > (keys_, augmentedHessianUniqueKeys);
+ return std::make_shared>(
+ keys_, augmentedHessianUniqueKeys);
}
/**
diff --git a/gtsam_unstable/slam/tests/testSmartStereoProjectionPoseFactor.cpp b/gtsam_unstable/slam/tests/testSmartStereoProjectionPoseFactor.cpp
index 1eceb8061..872cd2dea 100644
--- a/gtsam_unstable/slam/tests/testSmartStereoProjectionPoseFactor.cpp
+++ b/gtsam_unstable/slam/tests/testSmartStereoProjectionPoseFactor.cpp
@@ -1441,14 +1441,14 @@ TEST( SmartStereoProjectionPoseFactor, HessianWithRotationNonDegenerate ) {
std::shared_ptr hessianFactorRotTran =
smartFactor->linearize(tranValues);
- // Hessian is invariant to rotations and translations in the degenerate case
- EXPECT(
- assert_equal(hessianFactor->information(),
+ double error;
#ifdef GTSAM_USE_EIGEN_MKL
- hessianFactorRotTran->information(), 1e-5));
+ error = 1e-5;
#else
- hessianFactorRotTran->information(), 1e-6));
+ error = 1e-6;
#endif
+ // Hessian is invariant to rotations and translations in the degenerate case
+ EXPECT(assert_equal(hessianFactor->information(), hessianFactorRotTran->information(), error));
}
/* ************************************************************************* */
diff --git a/gtsam_unstable/timing/CMakeLists.txt b/gtsam_unstable/timing/CMakeLists.txt
index b5130ae92..44ea2f5af 100644
--- a/gtsam_unstable/timing/CMakeLists.txt
+++ b/gtsam_unstable/timing/CMakeLists.txt
@@ -1 +1 @@
-gtsamAddTimingGlob("*.cpp" "" "gtsam_unstable")
+gtsamAddTimingGlob("*.cpp" "" "gtsam_unstable" ${GTSAM_BUILD_TIMING_ALWAYS})
diff --git a/matlab/CMakeLists.txt b/matlab/CMakeLists.txt
index 45d56e41d..e38d7cb6f 100644
--- a/matlab/CMakeLists.txt
+++ b/matlab/CMakeLists.txt
@@ -64,6 +64,10 @@ set(ignore
gtsam::Point3
gtsam::CustomFactor)
+if (APPLE OR WIN32)
+ list(APPEND ignore gtsam::Symbol)
+endif()
+
set(interface_files
${GTSAM_SOURCE_DIR}/gtsam/gtsam.i
${GTSAM_SOURCE_DIR}/gtsam/base/base.i
diff --git a/matlab/README.md b/matlab/README.md
index 25628b633..ac0956cb7 100644
--- a/matlab/README.md
+++ b/matlab/README.md
@@ -10,6 +10,14 @@ The interface generated by the wrap tool also redirects the standard output stre
For instructions on updating the version of the [wrap library](https://github.com/borglab/wrap) included in GTSAM to the latest version, please refer to the [wrap README](https://github.com/borglab/wrap/blob/master/README.md#git-subtree-and-contributing)
+### Filename Case Sensitivity
+
+Due to the file name syntax requirements of Matlab, having the files `Symbol.m` (for the class) and `symbol.m` (for the function) together on an OS with a case-insensitive filesystem is impossible.
+
+To put it simply, for an OS like Windows or MacOS where the filesystem is case-insensitive, we cannot have two files `symbol.m` and `Symbol.m` in the same folder. When trying to write one file after another, they will end up overwriting each other. We cannot specify 2 different filenames, since Matlab's syntax prevents that.
+
+For this reason, on Windows and MacOS, we ignore the `Symbol` class and request users to use the `symbol` function exclusively for key creation.
+
## Ubuntu
If you have a newer Ubuntu system (later than 10.04), you must make a small modification to your MATLAB installation, due to MATLAB being distributed with an old version of the C++ standard library. Delete or rename all files starting with `libstdc++` in your MATLAB installation directory, in paths:
diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt
index 2b2abf507..f874c2f21 100644
--- a/python/CMakeLists.txt
+++ b/python/CMakeLists.txt
@@ -1,3 +1,4 @@
+set(PROJECT_PYTHON_SOURCE_DIR ${PROJECT_SOURCE_DIR}/python)
set(GTSAM_PYTHON_BUILD_DIRECTORY ${PROJECT_BINARY_DIR}/python)
if (NOT GTSAM_BUILD_PYTHON)
@@ -6,11 +7,11 @@ endif()
# Generate setup.py.
file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS)
-configure_file(${PROJECT_SOURCE_DIR}/python/setup.py.in
+configure_file(${PROJECT_PYTHON_SOURCE_DIR}/setup.py.in
${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py)
# Supply MANIFEST.in for older versions of Python
-file(COPY ${PROJECT_SOURCE_DIR}/python/MANIFEST.in
+file(COPY ${PROJECT_PYTHON_SOURCE_DIR}/MANIFEST.in
DESTINATION ${GTSAM_PYTHON_BUILD_DIRECTORY})
set(WRAP_BUILD_TYPE_POSTFIXES ${GTSAM_BUILD_TYPE_POSTFIXES})
@@ -99,7 +100,7 @@ pybind_wrap(${GTSAM_PYTHON_TARGET} # target
"gtsam" # module_name
"gtsam" # top_namespace
"${ignore}" # ignore_classes
- ${PROJECT_SOURCE_DIR}/python/gtsam/gtsam.tpl
+ ${PROJECT_PYTHON_SOURCE_DIR}/gtsam/gtsam.tpl
gtsam # libs
"gtsam;gtsam_header" # dependencies
${GTSAM_ENABLE_BOOST_SERIALIZATION} # use_boost_serialization
@@ -178,7 +179,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
"gtsam_unstable" # module_name
"gtsam" # top_namespace
"${ignore}" # ignore_classes
- ${PROJECT_SOURCE_DIR}/python/gtsam_unstable/gtsam_unstable.tpl
+ ${PROJECT_PYTHON_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.tpl
gtsam_unstable # libs
"gtsam_unstable;gtsam_unstable_header" # dependencies
${GTSAM_ENABLE_BOOST_SERIALIZATION} # use_boost_serialization
diff --git a/python/CustomFactors.md b/python/CustomFactors.md
index a6ffa2f36..39a840e34 100644
--- a/python/CustomFactors.md
+++ b/python/CustomFactors.md
@@ -11,17 +11,18 @@ import gtsam
import numpy as np
from typing import List
-def error_func(this: gtsam.CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
+def error_func(this: gtsam.CustomFactor, v: gtsam.Values, H: List[np.ndarray]) -> np.ndarray:
...
```
`this` is a reference to the `CustomFactor` object. This is required because one can reuse the same
`error_func` for multiple factors. `v` is a reference to the current set of values, and `H` is a list of
-**references** to the list of required Jacobians (see the corresponding C++ documentation).
+**references** to the list of required Jacobians (see the corresponding C++ documentation). Note that
+the error returned must be a 1D numpy array.
If `H` is `None`, it means the current factor evaluation does not need Jacobians. For example, the `error`
method on a factor does not need Jacobians, so we don't evaluate them to save CPU. If `H` is not `None`,
-each entry of `H` can be assigned a `numpy` array, as the Jacobian for the corresponding variable.
+each entry of `H` can be assigned a (2D) `numpy` array, as the Jacobian for the corresponding variable.
After defining `error_func`, one can create a `CustomFactor` just like any other factor in GTSAM:
@@ -42,7 +43,7 @@ from typing import List
expected = Pose2(2, 2, np.pi / 2)
-def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
+def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]) -> np.ndarray:
"""
Error function that mimics a BetweenFactor
:param this: reference to the current CustomFactor being evaluated
diff --git a/python/README.md b/python/README.md
index 278d62094..e81be74fc 100644
--- a/python/README.md
+++ b/python/README.md
@@ -16,7 +16,7 @@ For instructions on updating the version of the [wrap library](https://github.co
- This wrapper needs `pyparsing(>=2.4.2)`, and `numpy(>=1.11.0)`. These can be installed as follows:
```bash
- pip install -r /python/requirements.txt
+ pip install -r /python/dev_requirements.txt
```
## Install
diff --git a/python/dev_requirements.txt b/python/dev_requirements.txt
new file mode 100644
index 000000000..6970ee613
--- /dev/null
+++ b/python/dev_requirements.txt
@@ -0,0 +1,2 @@
+-r requirements.txt
+pyparsing>=2.4.2
\ No newline at end of file
diff --git a/python/gtsam/examples/CustomFactorExample.py b/python/gtsam/examples/CustomFactorExample.py
index c7ae8ad21..a78562ecb 100644
--- a/python/gtsam/examples/CustomFactorExample.py
+++ b/python/gtsam/examples/CustomFactorExample.py
@@ -15,7 +15,7 @@ from typing import List, Optional
import gtsam
import numpy as np
-I = np.eye(1)
+I = np.eye(1) # Creates a 1-element, 2D array
def simulate_car() -> List[float]:
@@ -30,7 +30,7 @@ def simulate_car() -> List[float]:
def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor,
values: gtsam.Values,
- jacobians: Optional[List[np.ndarray]]) -> float:
+ jacobians: Optional[List[np.ndarray]]) -> np.ndarray:
"""GPS Factor error function
:param measurement: GPS measurement, to be filled with `partial`
:param this: gtsam.CustomFactor handle
@@ -44,12 +44,12 @@ def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor,
if jacobians is not None:
jacobians[0] = I
- return error
+ return error # with input types this is a 1D np.ndarray
def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor,
values: gtsam.Values,
- jacobians: Optional[List[np.ndarray]]) -> float:
+ jacobians: Optional[List[np.ndarray]]) -> np.ndarray:
"""Odometry Factor error function
:param measurement: Odometry measurement, to be filled with `partial`
:param this: gtsam.CustomFactor handle
@@ -70,7 +70,7 @@ def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor,
def error_lm(measurement: np.ndarray, this: gtsam.CustomFactor,
values: gtsam.Values,
- jacobians: Optional[List[np.ndarray]]) -> float:
+ jacobians: Optional[List[np.ndarray]]) -> np.ndarray:
"""Landmark Factor error function
:param measurement: Landmark measurement, to be filled with `partial`
:param this: gtsam.CustomFactor handle
diff --git a/python/gtsam/tests/test_DsfTrackGenerator.py b/python/gtsam/tests/test_DsfTrackGenerator.py
index be6aa0796..618f8a8cc 100644
--- a/python/gtsam/tests/test_DsfTrackGenerator.py
+++ b/python/gtsam/tests/test_DsfTrackGenerator.py
@@ -4,6 +4,7 @@ Authors: John Lambert
"""
import unittest
+from typing import Dict, List, Tuple
import numpy as np
from gtsam.gtsfm import Keypoints
@@ -16,6 +17,29 @@ from gtsam import IndexPair, Point2, SfmTrack2d
class TestDsfTrackGenerator(GtsamTestCase):
"""Tests for DsfTrackGenerator."""
+ def test_generate_tracks_from_pairwise_matches_nontransitive(
+ self,
+ ) -> None:
+ """Tests DSF for non-transitive matches.
+
+ Test will result in no tracks since nontransitive tracks are naively discarded by DSF.
+ """
+ keypoints_list = get_dummy_keypoints_list()
+ nontransitive_matches_dict = get_nontransitive_matches() # contains one non-transitive track
+
+ # For each image pair (i1,i2), we provide a (K,2) matrix
+ # of corresponding keypoint indices (k1,k2).
+ matches_dict = {}
+ for (i1,i2), corr_idxs in nontransitive_matches_dict.items():
+ matches_dict[IndexPair(i1, i2)] = corr_idxs
+
+ tracks = gtsam.gtsfm.tracksFromPairwiseMatches(
+ matches_dict,
+ keypoints_list,
+ verbose=True,
+ )
+ self.assertEqual(len(tracks), 0, "Tracks not filtered correctly")
+
def test_track_generation(self) -> None:
"""Ensures that DSF generates three tracks from measurements
in 3 images (H=200,W=400)."""
@@ -29,7 +53,7 @@ class TestDsfTrackGenerator(GtsamTestCase):
keypoints_list.append(kps_i2)
# For each image pair (i1,i2), we provide a (K,2) matrix
- # of corresponding image indices (k1,k2).
+ # of corresponding keypoint indices (k1,k2).
matches_dict = {}
matches_dict[IndexPair(0, 1)] = np.array([[0, 0], [1, 1]])
matches_dict[IndexPair(1, 2)] = np.array([[2, 0], [1, 1]])
@@ -93,5 +117,73 @@ class TestSfmTrack2d(GtsamTestCase):
assert track.numberMeasurements() == 1
+def get_dummy_keypoints_list() -> List[Keypoints]:
+ """ """
+ img1_kp_coords = np.array([[1, 1], [2, 2], [3, 3.]])
+ img1_kp_scale = np.array([6.0, 9.0, 8.5])
+ img2_kp_coords = np.array(
+ [
+ [1, 1.],
+ [2, 2],
+ [3, 3],
+ [4, 4],
+ [5, 5],
+ [6, 6],
+ [7, 7],
+ [8, 8],
+ ]
+ )
+ img3_kp_coords = np.array(
+ [
+ [1, 1.],
+ [2, 2],
+ [3, 3],
+ [4, 4],
+ [5, 5],
+ [6, 6],
+ [7, 7],
+ [8, 8],
+ [9, 9],
+ [10, 10],
+ ]
+ )
+ img4_kp_coords = np.array(
+ [
+ [1, 1.],
+ [2, 2],
+ [3, 3],
+ [4, 4],
+ [5, 5],
+ ]
+ )
+ keypoints_list = [
+ Keypoints(coordinates=img1_kp_coords),
+ Keypoints(coordinates=img2_kp_coords),
+ Keypoints(coordinates=img3_kp_coords),
+ Keypoints(coordinates=img4_kp_coords),
+ ]
+ return keypoints_list
+
+
+def get_nontransitive_matches() -> Dict[Tuple[int, int], np.ndarray]:
+ """Set up correspondences for each (i1,i2) pair that violates transitivity.
+
+ (i=0, k=0) (i=0, k=1)
+ | \\ |
+ | \\ |
+ (i=1, k=2)--(i=2,k=3)--(i=3, k=4)
+
+ Transitivity is violated due to the match between frames 0 and 3.
+ """
+ nontransitive_matches_dict = {
+ (0, 1): np.array([[0, 2]]),
+ (1, 2): np.array([[2, 3]]),
+ (0, 2): np.array([[0, 3]]),
+ (0, 3): np.array([[1, 4]]),
+ (2, 3): np.array([[3, 4]]),
+ }
+ return nontransitive_matches_dict
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/python/gtsam/tests/test_Robust.py b/python/gtsam/tests/test_Robust.py
new file mode 100644
index 000000000..c2ab9d4a4
--- /dev/null
+++ b/python/gtsam/tests/test_Robust.py
@@ -0,0 +1,43 @@
+"""
+GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
+Atlanta, Georgia 30332-0415
+All Rights Reserved
+
+See LICENSE for the license information
+
+Simple unit test for custom robust noise model.
+Author: Fan Jiang
+"""
+import unittest
+
+import gtsam
+import numpy as np
+from gtsam.utils.test_case import GtsamTestCase
+
+
+class TestRobust(GtsamTestCase):
+
+ def test_RobustLossAndWeight(self):
+ k = 10.0
+
+ def custom_weight(e):
+ abs_e = abs(e)
+ return 1.0 if abs_e <= k else k / abs_e
+
+ def custom_loss(e):
+ abs_e = abs(e)
+ return abs_e * abs_e / 2.0 if abs_e <= k else k * abs_e - k * k / 2.0
+
+ custom_robust = gtsam.noiseModel.Robust.Create(
+ gtsam.noiseModel.mEstimator.Custom(custom_weight, custom_loss,
+ gtsam.noiseModel.mEstimator.Base.ReweightScheme.Scalar,
+ "huber"),
+ gtsam.noiseModel.Isotropic.Sigma(1, 2.0))
+ f = gtsam.PriorFactorDouble(0, 1.0, custom_robust)
+ v = gtsam.Values()
+ v.insert(0, 0.0)
+
+ self.assertAlmostEquals(f.error(v), 0.125)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/python/gtsam/tests/test_backwards_compatibility.py b/python/gtsam/tests/test_backwards_compatibility.py
index 414b65e8c..ca96cdf57 100644
--- a/python/gtsam/tests/test_backwards_compatibility.py
+++ b/python/gtsam/tests/test_backwards_compatibility.py
@@ -29,7 +29,7 @@ UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2)
class TestBackwardsCompatibility(GtsamTestCase):
- """Tests for the backwards compatibility for the Python wrapper."""
+ """Tests for backwards compatibility of the Python wrapper."""
def setUp(self):
"""Setup test fixtures"""
diff --git a/python/requirements.txt b/python/requirements.txt
index 481d27d8e..099cc80d6 100644
--- a/python/requirements.txt
+++ b/python/requirements.txt
@@ -1,2 +1 @@
numpy>=1.11.0
-pyparsing>=2.4.2
diff --git a/python/setup.py.in b/python/setup.py.in
index 9aa4b71f4..e15e39075 100644
--- a/python/setup.py.in
+++ b/python/setup.py.in
@@ -1,19 +1,17 @@
"""Setup file to install the GTSAM package."""
-try:
- from setuptools import setup, find_packages
-except ImportError:
- from distutils.core import setup, find_packages
+from setuptools import setup, find_namespace_packages
-packages = find_packages(where=".")
+packages = find_namespace_packages(
+ where=".",
+ exclude=('build', 'build.*', 'CMakeFiles', 'CMakeFiles.*',
+ 'gtsam.notebooks', '*.preamble', '*.specializations', 'dist'))
print("PACKAGES: ", packages)
package_data = {
'': [
"./*.so",
- "./*.dll",
- "Data/*" # Add the data files to the package
- "Data/**/*" # Add the data files in subdirectories
+ "./*.dll"
]
}
@@ -41,7 +39,6 @@ setup(
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'License :: OSI Approved :: BSD License',
- 'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
packages=packages,
diff --git a/tests/testGaussianISAM2.cpp b/tests/testGaussianISAM2.cpp
index 5331b7dc4..1ea60dac9 100644
--- a/tests/testGaussianISAM2.cpp
+++ b/tests/testGaussianISAM2.cpp
@@ -11,6 +11,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -657,6 +658,77 @@ namespace {
bool ok = treeEqual && /*linEqual &&*/ nonlinEqual && /*linCorrect &&*/ /*afterLinCorrect &&*/ afterNonlinCorrect;
return ok;
}
+
+ std::optional> createOrderingConstraints(const ISAM2& isam, const KeyVector& newKeys, const KeySet& marginalizableKeys)
+ {
+ if (marginalizableKeys.empty()) {
+ return {};
+ } else {
+ FastMap constrainedKeys = FastMap();
+ // Generate ordering constraints so that the marginalizable variables will be eliminated first
+ // Set all existing and new variables to Group1
+ for (const auto& key_val : isam.getDelta()) {
+ constrainedKeys.emplace(key_val.first, 1);
+ }
+ for (const auto& key : newKeys) {
+ constrainedKeys.emplace(key, 1);
+ }
+ // And then re-assign the marginalizable variables to Group0 so that they'll all be leaf nodes
+ for (const auto& key : marginalizableKeys) {
+ constrainedKeys.at(key) = 0;
+ }
+ return constrainedKeys;
+ }
+ }
+
+ void markAffectedKeys(const Key& key, const ISAM2Clique::shared_ptr& rootClique, KeyList& additionalKeys)
+ {
+ std::stack frontier;
+ frontier.push(rootClique);
+ // Basic DFS to find additional keys
+ while (!frontier.empty()) {
+ // Get the top of the stack
+ const ISAM2Clique::shared_ptr clique = frontier.top();
+ frontier.pop();
+ // Check if we have more keys and children to add
+ if (std::find(clique->conditional()->beginParents(), clique->conditional()->endParents(), key) !=
+ clique->conditional()->endParents()) {
+ for (Key i : clique->conditional()->frontals()) {
+ additionalKeys.push_back(i);
+ }
+ for (const ISAM2Clique::shared_ptr& child : clique->children) {
+ frontier.push(child);
+ }
+ }
+ }
+ }
+
+ bool updateAndMarginalize(const NonlinearFactorGraph& newFactors, const Values& newValues, const KeySet& marginalizableKeys, ISAM2& isam)
+ {
+ // Force ISAM2 to put marginalizable variables at the beginning
+ auto orderingConstraints = createOrderingConstraints(isam, newValues.keys(), marginalizableKeys);
+
+ // Mark additional keys between the marginalized keys and the leaves
+ KeyList markedKeys;
+ for (Key key : marginalizableKeys) {
+ markedKeys.push_back(key);
+ ISAM2Clique::shared_ptr clique = isam[key];
+ for (const ISAM2Clique::shared_ptr& child : clique->children) {
+ markAffectedKeys(key, child, markedKeys);
+ }
+ }
+
+ // Update
+ isam.update(newFactors, newValues, FactorIndices{}, orderingConstraints, {}, markedKeys);
+
+ if (!marginalizableKeys.empty()) {
+ FastList leafKeys(marginalizableKeys.begin(), marginalizableKeys.end());
+ return checkMarginalizeLeaves(isam, leafKeys);
+ }
+ else {
+ return true;
+ }
+ }
}
/* ************************************************************************* */
@@ -790,6 +862,116 @@ TEST(ISAM2, marginalizeLeaves5)
EXPECT(checkMarginalizeLeaves(isam, marginalizeKeys));
}
+/* ************************************************************************* */
+TEST(ISAM2, marginalizeLeaves6)
+{
+ auto nm = noiseModel::Isotropic::Sigma(6, 1.0);
+
+ int gridDim = 10;
+
+ auto idxToKey = [gridDim](int i, int j){return i * gridDim + j;};
+
+ NonlinearFactorGraph factors;
+ Values values;
+ ISAM2 isam;
+
+ // Create a grid of pose variables
+ for (int i = 0; i < gridDim; ++i) {
+ for (int j = 0; j < gridDim; ++j) {
+ Pose3 pose = Pose3(Rot3::Identity(), Point3(i, j, 0));
+ Key key = idxToKey(i, j);
+ // Place a prior on the first pose
+ factors.addPrior(key, Pose3(Rot3::Identity(), Point3(i, j, 0)), nm);
+ values.insert(key, pose);
+ if (i > 0) {
+ factors.emplace_shared>(idxToKey(i - 1, j), key, Pose3(Rot3::Identity(), Point3(1, 0, 0)),nm);
+ }
+ if (j > 0) {
+ factors.emplace_shared>(idxToKey(i, j - 1), key, Pose3(Rot3::Identity(), Point3(0, 1, 0)),nm);
+ }
+ }
+ }
+
+ // Optimize the graph
+ EXPECT(updateAndMarginalize(factors, values, {}, isam));
+ auto estimate = isam.calculateBestEstimate();
+
+ // Get the list of keys
+ std::vector key_list(gridDim * gridDim);
+ std::iota(key_list.begin(), key_list.end(), 0);
+
+ // Shuffle the keys -> we will marginalize the keys one by one in this order
+ std::shuffle(key_list.begin(), key_list.end(), std::default_random_engine(1234));
+
+ for (const auto& key : key_list) {
+ KeySet marginalKeys;
+ marginalKeys.insert(key);
+ EXPECT(updateAndMarginalize({}, {}, marginalKeys, isam));
+ estimate = isam.calculateBestEstimate();
+ }
+}
+
+/* ************************************************************************* */
+TEST(ISAM2, MarginalizeRoot)
+{
+ auto nm = noiseModel::Isotropic::Sigma(6, 1.0);
+
+ NonlinearFactorGraph factors;
+ Values values;
+ ISAM2 isam;
+
+ // Create a factor graph with one variable and a prior
+ Pose3 root = Pose3::Identity();
+ Key rootKey(0);
+ values.insert(rootKey, root);
+ factors.addPrior(rootKey, Pose3::Identity(), nm);
+
+ // Optimize the graph
+ EXPECT(updateAndMarginalize(factors, values, {}, isam));
+ auto estimate = isam.calculateBestEstimate();
+ EXPECT(estimate.size() == 1);
+
+ // And remove the node from the graph
+ KeySet marginalizableKeys;
+ marginalizableKeys.insert(rootKey);
+
+ EXPECT(updateAndMarginalize({}, {}, marginalizableKeys, isam));
+
+ estimate = isam.calculateBestEstimate();
+ EXPECT(estimate.empty());
+}
+
+/* ************************************************************************* */
+TEST(ISAM2, marginalizationSize)
+{
+ auto nm = noiseModel::Isotropic::Sigma(6, 1.0);
+
+ NonlinearFactorGraph factors;
+ Values values;
+ ISAM2Params params;
+ params.findUnusedFactorSlots = true;
+ ISAM2 isam{params};
+
+ // Create a pose variable
+ Key aKey(0);
+ values.insert(aKey, Pose3::Identity());
+ factors.addPrior(aKey, Pose3::Identity(), nm);
+ // Create another pose variable linked to the first
+ Pose3 b = Pose3::Identity();
+ Key bKey(1);
+ values.insert(bKey, Pose3::Identity());
+ factors.emplace_shared>(aKey, bKey, Pose3::Identity(), nm);
+ // Optimize the graph
+ EXPECT(updateAndMarginalize(factors, values, {}, isam));
+
+ // Now remove a variable -> we should not see the number of factors increase
+ gtsam::KeySet to_remove;
+ to_remove.insert(aKey);
+ const auto numFactorsBefore = isam.getFactorsUnsafe().size();
+ updateAndMarginalize({}, {}, to_remove, isam);
+ EXPECT(numFactorsBefore == isam.getFactorsUnsafe().size());
+}
+
/* ************************************************************************* */
TEST(ISAM2, marginalCovariance)
{
diff --git a/tests/testNonlinearFactor.cpp b/tests/testNonlinearFactor.cpp
index 06ae3366c..cca78b80e 100644
--- a/tests/testNonlinearFactor.cpp
+++ b/tests/testNonlinearFactor.cpp
@@ -323,7 +323,7 @@ TEST( NonlinearFactor, cloneWithNewNoiseModel )
// create actual
NonlinearFactorGraph actual;
SharedNoiseModel noise2 = noiseModel::Isotropic::Sigma(2,sigma2);
- actual.push_back( std::dynamic_pointer_cast(nfg[0])->cloneWithNewNoiseModel(noise2) );
+ actual.push_back(nfg.at(0)->cloneWithNewNoiseModel(noise2));
// check it's all good
CHECK(assert_equal(expected, actual));
diff --git a/timing/CMakeLists.txt b/timing/CMakeLists.txt
index e3f97ee0c..f4fbae940 100644
--- a/timing/CMakeLists.txt
+++ b/timing/CMakeLists.txt
@@ -14,6 +14,6 @@ if (NOT GTSAM_USE_BOOST_FEATURES)
)
endif()
-gtsamAddTimingGlob("*.cpp" "${excluded_scripts}" "gtsam")
+gtsamAddTimingGlob("*.cpp" "${excluded_scripts}" "gtsam" ${GTSAM_BUILD_TIMING_ALWAYS})
target_link_libraries(timeGaussianFactorGraph CppUnitLite)
diff --git a/wrap/.gitignore b/wrap/.gitignore
index 9f79deafa..de16b118d 100644
--- a/wrap/.gitignore
+++ b/wrap/.gitignore
@@ -5,6 +5,8 @@ __pycache__/
*dist*
*.egg-info
+**/.DS_Store
+
# Files related to code coverage stats
**/.coverage
diff --git a/wrap/gtwrap/interface_parser/function.py b/wrap/gtwrap/interface_parser/function.py
index b3f5d3227..b40884488 100644
--- a/wrap/gtwrap/interface_parser/function.py
+++ b/wrap/gtwrap/interface_parser/function.py
@@ -12,7 +12,8 @@ Author: Duy Nguyen Ta, Fan Jiang, Matthew Sklar, Varun Agrawal, and Frank Dellae
from typing import Any, Iterable, List, Union
-from pyparsing import Optional, ParseResults, delimitedList # type: ignore
+from pyparsing import (Literal, Optional, ParseResults, # type: ignore
+ delimitedList)
from .template import Template
from .tokens import (COMMA, DEFAULT_ARG, EQUAL, IDENT, LOPBRACK, LPAREN, PAIR,
@@ -105,8 +106,10 @@ class ReturnType:
The return type can either be a single type or a pair such as .
"""
+ # rule to parse optional std:: in front of `pair`
+ optional_std = Optional(Literal('std::')).suppress()
_pair = (
- PAIR.suppress() #
+ optional_std + PAIR.suppress() #
+ LOPBRACK #
+ Type.rule("type1") #
+ COMMA #
diff --git a/wrap/gtwrap/matlab_wrapper/wrapper.py b/wrap/gtwrap/matlab_wrapper/wrapper.py
index 146209c44..9cd367cf3 100755
--- a/wrap/gtwrap/matlab_wrapper/wrapper.py
+++ b/wrap/gtwrap/matlab_wrapper/wrapper.py
@@ -1284,7 +1284,8 @@ class MatlabWrapper(CheckMixin, FormatMixin):
[instantiated_class.name])
else:
# Get the full namespace
- class_name = ".".join(instantiated_class.parent.full_namespaces()[1:])
+ class_name = ".".join(
+ instantiated_class.parent.full_namespaces()[1:])
if class_name != "":
class_name += '.'
@@ -1860,6 +1861,7 @@ class MatlabWrapper(CheckMixin, FormatMixin):
"""
for c in cc_content:
if isinstance(c, list):
+ # c is a namespace
if len(c) == 0:
continue
@@ -1875,6 +1877,7 @@ class MatlabWrapper(CheckMixin, FormatMixin):
self.generate_content(sub_content[1], path_to_folder)
elif isinstance(c[1], list):
+ # c is a wrapped function
path_to_folder = osp.join(path, c[0])
if not osp.isdir(path_to_folder):
@@ -1882,11 +1885,13 @@ class MatlabWrapper(CheckMixin, FormatMixin):
os.makedirs(path_to_folder, exist_ok=True)
except OSError:
pass
+
for sub_content in c[1]:
path_to_file = osp.join(path_to_folder, sub_content[0])
with open(path_to_file, 'w') as f:
f.write(sub_content[1])
else:
+ # c is a wrapped class
path_to_file = osp.join(path, c[0])
if not osp.isdir(path_to_file):
@@ -1921,6 +1926,8 @@ class MatlabWrapper(CheckMixin, FormatMixin):
for module in modules.values():
# Wrap the full namespace
self.wrap_namespace(module)
+
+ # Generate the wrapping code (both C++ and .m files)
self.generate_wrapper(module)
# Generate the corresponding .m and .cpp files
diff --git a/wrap/pybind11/.clang-tidy b/wrap/pybind11/.clang-tidy
index d945a4a27..23018386c 100644
--- a/wrap/pybind11/.clang-tidy
+++ b/wrap/pybind11/.clang-tidy
@@ -63,6 +63,8 @@ Checks: |
-bugprone-unused-raii,
CheckOptions:
+- key: modernize-use-equals-default.IgnoreMacros
+ value: false
- key: performance-for-range-copy.WarnOnAllAutoCopies
value: true
- key: performance-inefficient-string-concatenation.StrictMode
diff --git a/wrap/pybind11/.codespell-ignore-lines b/wrap/pybind11/.codespell-ignore-lines
new file mode 100644
index 000000000..2a01d63eb
--- /dev/null
+++ b/wrap/pybind11/.codespell-ignore-lines
@@ -0,0 +1,24 @@
+template
+ template
+ auto &this_ = static_cast(*this);
+ if (load_impl(temp, false)) {
+ ssize_t nd = 0;
+ auto trivial = broadcast(buffers, nd, shape);
+ auto ndim = (size_t) nd;
+ int nd;
+ ssize_t ndim() const { return detail::array_proxy(m_ptr)->nd; }
+ using op = op_impl;
+template
+ template
+ class_ &def(const detail::op_ &op, const Extra &...extra) {
+ class_ &def_cast(const detail::op_ &op, const Extra &...extra) {
+@pytest.mark.parametrize("access", ["ro", "rw", "static_ro", "static_rw"])
+struct IntStruct {
+ explicit IntStruct(int v) : value(v){};
+ ~IntStruct() { value = -value; }
+ IntStruct(const IntStruct &) = default;
+ IntStruct &operator=(const IntStruct &) = default;
+ py::class_(m, "IntStruct").def(py::init([](const int i) { return IntStruct(i); }));
+ py::implicitly_convertible();
+ m.def("test", [](int expected, const IntStruct &in) {
+ [](int expected, const IntStruct &in) {
diff --git a/wrap/pybind11/.github/CONTRIBUTING.md b/wrap/pybind11/.github/CONTRIBUTING.md
index 00b1fea4c..ad7974395 100644
--- a/wrap/pybind11/.github/CONTRIBUTING.md
+++ b/wrap/pybind11/.github/CONTRIBUTING.md
@@ -235,8 +235,8 @@ directory inside your pybind11 git clone. Files will be modified in place,
so you can use git to monitor the changes.
```bash
-docker run --rm -v $PWD:/mounted_pybind11 -it silkeh/clang:13
-apt-get update && apt-get install -y python3-dev python3-pytest
+docker run --rm -v $PWD:/mounted_pybind11 -it silkeh/clang:15-bullseye
+apt-get update && apt-get install -y git python3-dev python3-pytest
cmake -S /mounted_pybind11/ -B build -DCMAKE_CXX_CLANG_TIDY="$(which clang-tidy);--use-color" -DDOWNLOAD_EIGEN=ON -DDOWNLOAD_CATCH=ON -DCMAKE_CXX_STANDARD=17
cmake --build build -j 2
```
diff --git a/wrap/pybind11/.github/ISSUE_TEMPLATE/bug-report.yml b/wrap/pybind11/.github/ISSUE_TEMPLATE/bug-report.yml
index bd6a9a8e2..4f1e78f33 100644
--- a/wrap/pybind11/.github/ISSUE_TEMPLATE/bug-report.yml
+++ b/wrap/pybind11/.github/ISSUE_TEMPLATE/bug-report.yml
@@ -6,7 +6,8 @@ body:
- type: markdown
attributes:
value: |
- Maintainers will only make a best effort to triage PRs. Please do your best to make the issue as easy to act on as possible, and only open if clearly a problem with pybind11 (ask first if unsure).
+ Please do your best to make the issue as easy to act on as possible, and only submit here if there is clearly a problem with pybind11 (ask first if unsure). **Note that a reproducer in a PR is much more likely to get immediate attention.**
+
- type: checkboxes
id: steps
attributes:
@@ -20,6 +21,13 @@ body:
- label: Consider asking first in the [Gitter chat room](https://gitter.im/pybind/Lobby) or in a [Discussion](https:/pybind/pybind11/discussions/new).
required: false
+ - type: input
+ id: version
+ attributes:
+ label: What version (or hash if on master) of pybind11 are you using?
+ validations:
+ required: true
+
- type: textarea
id: description
attributes:
@@ -40,6 +48,14 @@ body:
The code should be minimal, have no external dependencies, isolate the
function(s) that cause breakage. Submit matched and complete C++ and
Python snippets that can be easily compiled and run to diagnose the
- issue. If possible, make a PR with a new, failing test to give us a
- starting point to work on!
+ issue. — Note that a reproducer in a PR is much more likely to get
+ immediate attention: failing tests in the pybind11 CI are the best
+ starting point for working out fixes.
render: text
+
+ - type: input
+ id: regression
+ attributes:
+ label: Is this a regression? Put the last known working version here if it is.
+ description: Put the last known working version here if this is a regression.
+ value: Not a regression
diff --git a/wrap/pybind11/.github/workflows/ci.yml b/wrap/pybind11/.github/workflows/ci.yml
index 412282a4f..48f7c5e93 100644
--- a/wrap/pybind11/.github/workflows/ci.yml
+++ b/wrap/pybind11/.github/workflows/ci.yml
@@ -9,14 +9,19 @@ on:
- stable
- v*
+permissions: read-all
+
concurrency:
group: test-${{ github.ref }}
cancel-in-progress: true
env:
+ PIP_BREAK_SYSTEM_PACKAGES: 1
PIP_ONLY_BINARY: numpy
FORCE_COLOR: 3
PYTEST_TIMEOUT: 300
+ # For cmake:
+ VERBOSE: 1
jobs:
# This is the "main" test suite, which tests a large number of different
@@ -25,15 +30,16 @@ jobs:
strategy:
fail-fast: false
matrix:
- runs-on: [ubuntu-latest, windows-2022, macos-latest]
+ runs-on: [ubuntu-20.04, windows-2022, macos-latest]
python:
- '3.6'
- '3.9'
- '3.10'
- - '3.11-dev'
- - 'pypy-3.7'
+ - '3.11'
+ - '3.12'
- 'pypy-3.8'
- 'pypy-3.9'
+ - 'pypy-3.10'
# Items in here will either be added to the build matrix (if not
# present), or add new keys to an existing matrix element if all the
@@ -42,12 +48,12 @@ jobs:
# We support an optional key: args, for cmake args
include:
# Just add a key
- - runs-on: ubuntu-latest
+ - runs-on: ubuntu-20.04
python: '3.6'
args: >
-DPYBIND11_FINDPYTHON=ON
-DCMAKE_CXX_FLAGS="-D_=1"
- - runs-on: ubuntu-latest
+ - runs-on: ubuntu-20.04
python: 'pypy-3.8'
args: >
-DPYBIND11_FINDPYTHON=ON
@@ -69,6 +75,7 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
+ allow-prereleases: true
- name: Setup Boost (Linux)
# Can't use boost + define _
@@ -80,7 +87,7 @@ jobs:
run: brew install boost
- name: Update CMake
- uses: jwlawson/actions-setup-cmake@v1.12
+ uses: jwlawson/actions-setup-cmake@v1.14
- name: Cache wheels
if: runner.os == 'macOS'
@@ -102,10 +109,12 @@ jobs:
run: python -m pip install pytest-github-actions-annotate-failures
# First build - C++11 mode and inplace
+ # More-or-less randomly adding -DPYBIND11_SIMPLE_GIL_MANAGEMENT=ON here.
- name: Configure C++11 ${{ matrix.args }}
run: >
cmake -S . -B .
-DPYBIND11_WERROR=ON
+ -DPYBIND11_SIMPLE_GIL_MANAGEMENT=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=11
@@ -119,7 +128,7 @@ jobs:
- name: C++11 tests
# TODO: Figure out how to load the DLL on Python 3.8+
- if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10' || matrix.python == '3.11-dev' || matrix.python == 'pypy-3.8'))"
+ if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10' || matrix.python == '3.11' || matrix.python == 'pypy-3.8'))"
run: cmake --build . --target cpptest -j 2
- name: Interface test C++11
@@ -129,10 +138,12 @@ jobs:
run: git clean -fdx
# Second build - C++17 mode and in a build directory
+ # More-or-less randomly adding -DPYBIND11_SIMPLE_GIL_MANAGEMENT=OFF here.
- name: Configure C++17
run: >
cmake -S . -B build2
-DPYBIND11_WERROR=ON
+ -DPYBIND11_SIMPLE_GIL_MANAGEMENT=OFF
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=17
@@ -146,7 +157,7 @@ jobs:
- name: C++ tests
# TODO: Figure out how to load the DLL on Python 3.8+
- if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10' || matrix.python == '3.11-dev' || matrix.python == 'pypy-3.8'))"
+ if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10' || matrix.python == '3.11' || matrix.python == 'pypy-3.8'))"
run: cmake --build build2 --target cpptest
# Third build - C++17 mode with unstable ABI
@@ -158,7 +169,6 @@ jobs:
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=17
-DPYBIND11_INTERNALS_VERSION=10000000
- "-DPYBIND11_TEST_OVERRIDE=test_call_policies.cpp;test_gil_scoped.cpp;test_thread.cpp"
${{ matrix.args }}
- name: Build (unstable ABI)
@@ -173,7 +183,9 @@ jobs:
# This makes sure the setup_helpers module can build packages using
# setuptools
- name: Setuptools helpers test
- run: pytest tests/extra_setuptools
+ run: |
+ pip install setuptools
+ pytest tests/extra_setuptools
if: "!(matrix.runs-on == 'windows-2022')"
@@ -186,23 +198,23 @@ jobs:
- python-version: "3.9"
python-debug: true
valgrind: true
- - python-version: "3.11-dev"
+ - python-version: "3.11"
python-debug: false
name: "🐍 ${{ matrix.python-version }}${{ matrix.python-debug && '-dbg' || '' }} (deadsnakes)${{ matrix.valgrind && ' • Valgrind' || '' }} • x64"
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- name: Setup Python ${{ matrix.python-version }} (deadsnakes)
- uses: deadsnakes/action@v2.1.1
+ uses: deadsnakes/action@v3.0.1
with:
python-version: ${{ matrix.python-version }}
debug: ${{ matrix.python-debug }}
- name: Update CMake
- uses: jwlawson/actions-setup-cmake@v1.12
+ uses: jwlawson/actions-setup-cmake@v1.14
- name: Valgrind cache
if: matrix.valgrind
@@ -235,8 +247,6 @@ jobs:
python -m pip install -r tests/requirements.txt
- name: Configure
- env:
- SETUPTOOLS_USE_DISTUTILS: stdlib
run: >
cmake -S . -B build
-DCMAKE_BUILD_TYPE=Debug
@@ -274,18 +284,27 @@ jobs:
- dev
std:
- 11
+ container_suffix:
+ - ""
include:
- clang: 5
std: 14
- - clang: 10
- std: 20
- clang: 10
std: 17
+ - clang: 11
+ std: 20
+ - clang: 12
+ std: 20
+ - clang: 13
+ std: 20
- clang: 14
std: 20
+ - clang: 15
+ std: 20
+ container_suffix: "-bullseye"
name: "🐍 3 • Clang ${{ matrix.clang }} • C++${{ matrix.std }} • x64"
- container: "silkeh/clang:${{ matrix.clang }}"
+ container: "silkeh/clang:${{ matrix.clang }}${{ matrix.container_suffix }}"
steps:
- uses: actions/checkout@v3
@@ -318,8 +337,8 @@ jobs:
# Testing NVCC; forces sources to behave like .cu files
cuda:
runs-on: ubuntu-latest
- name: "🐍 3.8 • CUDA 11.2 • Ubuntu 20.04"
- container: nvidia/cuda:11.2.2-devel-ubuntu20.04
+ name: "🐍 3.10 • CUDA 12.2 • Ubuntu 22.04"
+ container: nvidia/cuda:12.2.0-devel-ubuntu22.04
steps:
- uses: actions/checkout@v3
@@ -384,8 +403,9 @@ jobs:
# Testing on CentOS 7 + PGI compilers, which seems to require more workarounds
centos-nvhpc7:
+ if: ${{ false }} # JOB DISABLED (NEEDS WORK): https://github.com/pybind/pybind11/issues/4690
runs-on: ubuntu-latest
- name: "🐍 3 • CentOS7 / PGI 22.3 • x64"
+ name: "🐍 3 • CentOS7 / PGI 22.9 • x64"
container: centos:7
steps:
@@ -395,7 +415,7 @@ jobs:
run: yum update -y && yum install -y epel-release && yum install -y git python3-devel make environment-modules cmake3 yum-utils
- name: Install NVidia HPC SDK
- run: yum-config-manager --add-repo https://developer.download.nvidia.com/hpc-sdk/rhel/nvhpc.repo && yum -y install nvhpc-22.3
+ run: yum-config-manager --add-repo https://developer.download.nvidia.com/hpc-sdk/rhel/nvhpc.repo && yum -y install nvhpc-22.9
# On CentOS 7, we have to filter a few tests (compiler internal error)
# and allow deeper template recursion (not needed on CentOS 8 with a newer
@@ -405,12 +425,12 @@ jobs:
shell: bash
run: |
source /etc/profile.d/modules.sh
- module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/22.3
+ module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/22.9
cmake3 -S . -B build -DDOWNLOAD_CATCH=ON \
-DCMAKE_CXX_STANDARD=11 \
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") \
-DCMAKE_CXX_FLAGS="-Wc,--pending_instantiations=0" \
- -DPYBIND11_TEST_FILTER="test_smart_ptr.cpp;test_virtual_functions.cpp"
+ -DPYBIND11_TEST_FILTER="test_smart_ptr.cpp"
# Building before installing Pip should produce a warning but not an error
- name: Build
@@ -437,14 +457,14 @@ jobs:
strategy:
fail-fast: false
matrix:
- gcc:
- - 7
- - latest
- std:
- - 11
include:
- - gcc: 10
- std: 20
+ - { gcc: 7, std: 11 }
+ - { gcc: 7, std: 17 }
+ - { gcc: 8, std: 14 }
+ - { gcc: 8, std: 17 }
+ - { gcc: 10, std: 17 }
+ - { gcc: 11, std: 20 }
+ - { gcc: 12, std: 20 }
name: "🐍 3 • GCC ${{ matrix.gcc }} • C++${{ matrix.std }}• x64"
container: "gcc:${{ matrix.gcc }}"
@@ -459,7 +479,7 @@ jobs:
run: python3 -m pip install --upgrade pip
- name: Update CMake
- uses: jwlawson/actions-setup-cmake@v1.12
+ uses: jwlawson/actions-setup-cmake@v1.14
- name: Configure
shell: bash
@@ -482,6 +502,24 @@ jobs:
- name: Interface test
run: cmake --build build --target test_cmake_build
+ - name: Configure - Exercise cmake -DPYBIND11_TEST_OVERRIDE
+ if: matrix.gcc == '12'
+ shell: bash
+ run: >
+ cmake -S . -B build_partial
+ -DPYBIND11_WERROR=ON
+ -DDOWNLOAD_CATCH=ON
+ -DCMAKE_CXX_STANDARD=${{ matrix.std }}
+ -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
+ "-DPYBIND11_TEST_OVERRIDE=test_call_policies.cpp;test_gil_scoped.cpp;test_thread.cpp"
+
+ - name: Build - Exercise cmake -DPYBIND11_TEST_OVERRIDE
+ if: matrix.gcc == '12'
+ run: cmake --build build_partial -j 2
+
+ - name: Python tests - Exercise cmake -DPYBIND11_TEST_OVERRIDE
+ if: matrix.gcc == '12'
+ run: cmake --build build_partial --target pytest
# Testing on ICC using the oneAPI apt repo
icc:
@@ -731,6 +769,9 @@ jobs:
args: -DCMAKE_CXX_STANDARD=20
- python: 3.8
args: -DCMAKE_CXX_STANDARD=17
+ - python: 3.7
+ args: -DCMAKE_CXX_STANDARD=14
+
name: "🐍 ${{ matrix.python }} • MSVC 2019 • x86 ${{ matrix.args }}"
runs-on: windows-2019
@@ -745,10 +786,10 @@ jobs:
architecture: x86
- name: Update CMake
- uses: jwlawson/actions-setup-cmake@v1.12
+ uses: jwlawson/actions-setup-cmake@v1.14
- name: Prepare MSVC
- uses: ilammy/msvc-dev-cmd@v1.10.0
+ uses: ilammy/msvc-dev-cmd@v1.12.1
with:
arch: x86
@@ -798,10 +839,10 @@ jobs:
architecture: x86
- name: Update CMake
- uses: jwlawson/actions-setup-cmake@v1.12
+ uses: jwlawson/actions-setup-cmake@v1.14
- name: Prepare MSVC
- uses: ilammy/msvc-dev-cmd@v1.10.0
+ uses: ilammy/msvc-dev-cmd@v1.12.1
with:
arch: x86
@@ -849,7 +890,7 @@ jobs:
python3 -m pip install -r tests/requirements.txt
- name: Update CMake
- uses: jwlawson/actions-setup-cmake@v1.12
+ uses: jwlawson/actions-setup-cmake@v1.14
- name: Configure C++20
run: >
@@ -871,6 +912,21 @@ jobs:
- name: Interface test C++20
run: cmake --build build --target test_cmake_build
+ - name: Configure C++20 - Exercise cmake -DPYBIND11_TEST_OVERRIDE
+ run: >
+ cmake -S . -B build_partial
+ -DPYBIND11_WERROR=ON
+ -DDOWNLOAD_CATCH=ON
+ -DDOWNLOAD_EIGEN=ON
+ -DCMAKE_CXX_STANDARD=20
+ "-DPYBIND11_TEST_OVERRIDE=test_call_policies.cpp;test_gil_scoped.cpp;test_thread.cpp"
+
+ - name: Build C++20 - Exercise cmake -DPYBIND11_TEST_OVERRIDE
+ run: cmake --build build_partial -j 2
+
+ - name: Python tests - Exercise cmake -DPYBIND11_TEST_OVERRIDE
+ run: cmake --build build_partial --target pytest
+
mingw:
name: "🐍 3 • windows-latest • ${{ matrix.sys }}"
runs-on: windows-latest
@@ -905,7 +961,7 @@ jobs:
- name: Configure C++11
# LTO leads to many undefined reference like
# `pybind11::detail::function_call::function_call(pybind11::detail::function_call&&)
- run: cmake -G "MinGW Makefiles" -DCMAKE_CXX_STANDARD=11 -DDOWNLOAD_CATCH=ON -S . -B build
+ run: cmake -G "MinGW Makefiles" -DCMAKE_CXX_STANDARD=11 -DPYBIND11_WERROR=ON -DDOWNLOAD_CATCH=ON -S . -B build
- name: Build C++11
run: cmake --build build -j 2
@@ -923,7 +979,7 @@ jobs:
run: git clean -fdx
- name: Configure C++14
- run: cmake -G "MinGW Makefiles" -DCMAKE_CXX_STANDARD=14 -DDOWNLOAD_CATCH=ON -S . -B build2
+ run: cmake -G "MinGW Makefiles" -DCMAKE_CXX_STANDARD=14 -DPYBIND11_WERROR=ON -DDOWNLOAD_CATCH=ON -S . -B build2
- name: Build C++14
run: cmake --build build2 -j 2
@@ -941,7 +997,7 @@ jobs:
run: git clean -fdx
- name: Configure C++17
- run: cmake -G "MinGW Makefiles" -DCMAKE_CXX_STANDARD=17 -DDOWNLOAD_CATCH=ON -S . -B build3
+ run: cmake -G "MinGW Makefiles" -DCMAKE_CXX_STANDARD=17 -DPYBIND11_WERROR=ON -DDOWNLOAD_CATCH=ON -S . -B build3
- name: Build C++17
run: cmake --build build3 -j 2
@@ -954,3 +1010,156 @@ jobs:
- name: Interface test C++17
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build3 --target test_cmake_build
+
+ windows_clang:
+
+ strategy:
+ matrix:
+ os: [windows-latest]
+ python: ['3.10']
+
+ runs-on: "${{ matrix.os }}"
+
+ name: "🐍 ${{ matrix.python }} • ${{ matrix.os }} • clang-latest"
+
+ steps:
+ - name: Show env
+ run: env
+
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Set up Clang
+ uses: egor-tensin/setup-clang@v1
+
+ - name: Setup Python ${{ matrix.python }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python }}
+
+ - name: Update CMake
+ uses: jwlawson/actions-setup-cmake@v1.14
+
+ - name: Install ninja-build tool
+ uses: seanmiddleditch/gha-setup-ninja@v3
+
+ - name: Run pip installs
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install -r tests/requirements.txt
+
+ - name: Show Clang++ version
+ run: clang++ --version
+
+ - name: Show CMake version
+ run: cmake --version
+
+ # TODO: WERROR=ON
+ - name: Configure Clang
+ run: >
+ cmake -G Ninja -S . -B .
+ -DPYBIND11_WERROR=OFF
+ -DPYBIND11_SIMPLE_GIL_MANAGEMENT=OFF
+ -DDOWNLOAD_CATCH=ON
+ -DDOWNLOAD_EIGEN=ON
+ -DCMAKE_CXX_COMPILER=clang++
+ -DCMAKE_CXX_STANDARD=17
+
+ - name: Build
+ run: cmake --build . -j 2
+
+ - name: Python tests
+ run: cmake --build . --target pytest -j 2
+
+ - name: C++ tests
+ run: cmake --build . --target cpptest -j 2
+
+ - name: Interface test
+ run: cmake --build . --target test_cmake_build -j 2
+
+ - name: Clean directory
+ run: git clean -fdx
+
+ macos_brew_install_llvm:
+ name: "macos-latest • brew install llvm"
+ runs-on: macos-latest
+
+ env:
+ # https://apple.stackexchange.com/questions/227026/how-to-install-recent-clang-with-homebrew
+ LDFLAGS: '-L/usr/local/opt/llvm/lib -Wl,-rpath,/usr/local/opt/llvm/lib'
+
+ steps:
+ - name: Update PATH
+ run: echo "/usr/local/opt/llvm/bin" >> $GITHUB_PATH
+
+ - name: Show env
+ run: env
+
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Show Clang++ version before brew install llvm
+ run: clang++ --version
+
+ - name: brew install llvm
+ run: brew install llvm
+
+ - name: Show Clang++ version after brew install llvm
+ run: clang++ --version
+
+ - name: Update CMake
+ uses: jwlawson/actions-setup-cmake@v1.14
+
+ - name: Run pip installs
+ run: |
+ python3 -m pip install --upgrade pip
+ python3 -m pip install -r tests/requirements.txt
+ python3 -m pip install numpy
+ python3 -m pip install scipy
+
+ - name: Show CMake version
+ run: cmake --version
+
+ - name: CMake Configure
+ run: >
+ cmake -S . -B .
+ -DPYBIND11_WERROR=ON
+ -DPYBIND11_SIMPLE_GIL_MANAGEMENT=OFF
+ -DDOWNLOAD_CATCH=ON
+ -DDOWNLOAD_EIGEN=ON
+ -DCMAKE_CXX_COMPILER=clang++
+ -DCMAKE_CXX_STANDARD=17
+ -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
+
+ - name: Build
+ run: cmake --build . -j 2
+
+ - name: Python tests
+ run: cmake --build . --target pytest -j 2
+
+ - name: C++ tests
+ run: cmake --build . --target cpptest -j 2
+
+ - name: Interface test
+ run: cmake --build . --target test_cmake_build -j 2
+
+ - name: CMake Configure - Exercise cmake -DPYBIND11_TEST_OVERRIDE
+ run: >
+ cmake -S . -B build_partial
+ -DPYBIND11_WERROR=ON
+ -DPYBIND11_SIMPLE_GIL_MANAGEMENT=OFF
+ -DDOWNLOAD_CATCH=ON
+ -DDOWNLOAD_EIGEN=ON
+ -DCMAKE_CXX_COMPILER=clang++
+ -DCMAKE_CXX_STANDARD=17
+ -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
+ "-DPYBIND11_TEST_OVERRIDE=test_call_policies.cpp;test_gil_scoped.cpp;test_thread.cpp"
+
+ - name: Build - Exercise cmake -DPYBIND11_TEST_OVERRIDE
+ run: cmake --build build_partial -j 2
+
+ - name: Python tests - Exercise cmake -DPYBIND11_TEST_OVERRIDE
+ run: cmake --build build_partial --target pytest -j 2
+
+ - name: Clean directory
+ run: git clean -fdx
diff --git a/wrap/pybind11/.github/workflows/configure.yml b/wrap/pybind11/.github/workflows/configure.yml
index edcad4198..ec7cd612d 100644
--- a/wrap/pybind11/.github/workflows/configure.yml
+++ b/wrap/pybind11/.github/workflows/configure.yml
@@ -9,6 +9,14 @@ on:
- stable
- v*
+permissions:
+ contents: read
+
+env:
+ PIP_BREAK_SYSTEM_PACKAGES: 1
+ # For cmake:
+ VERBOSE: 1
+
jobs:
# This tests various versions of CMake in various combinations, to make sure
# the configure step passes.
@@ -16,22 +24,26 @@ jobs:
strategy:
fail-fast: false
matrix:
- runs-on: [ubuntu-latest, macos-latest, windows-latest]
+ runs-on: [ubuntu-20.04, macos-latest, windows-latest]
arch: [x64]
- cmake: ["3.23"]
+ cmake: ["3.26"]
include:
- - runs-on: ubuntu-latest
+ - runs-on: ubuntu-20.04
arch: x64
- cmake: 3.4
+ cmake: "3.5"
+
+ - runs-on: ubuntu-20.04
+ arch: x64
+ cmake: "3.27"
- runs-on: macos-latest
arch: x64
- cmake: 3.7
+ cmake: "3.7"
- runs-on: windows-2019
arch: x64 # x86 compilers seem to be missing on 2019 image
- cmake: 3.18
+ cmake: "3.18"
name: 🐍 3.7 • CMake ${{ matrix.cmake }} • ${{ matrix.runs-on }}
runs-on: ${{ matrix.runs-on }}
@@ -51,7 +63,7 @@ jobs:
# An action for adding a specific version of CMake:
# https://github.com/jwlawson/actions-setup-cmake
- name: Setup CMake ${{ matrix.cmake }}
- uses: jwlawson/actions-setup-cmake@v1.12
+ uses: jwlawson/actions-setup-cmake@v1.14
with:
cmake-version: ${{ matrix.cmake }}
diff --git a/wrap/pybind11/.github/workflows/format.yml b/wrap/pybind11/.github/workflows/format.yml
index 31d893c47..b8242ee52 100644
--- a/wrap/pybind11/.github/workflows/format.yml
+++ b/wrap/pybind11/.github/workflows/format.yml
@@ -12,8 +12,13 @@ on:
- stable
- "v*"
+permissions:
+ contents: read
+
env:
FORCE_COLOR: 3
+ # For cmake:
+ VERBOSE: 1
jobs:
pre-commit:
@@ -36,12 +41,12 @@ jobs:
# in .github/CONTRIBUTING.md and update as needed.
name: Clang-Tidy
runs-on: ubuntu-latest
- container: silkeh/clang:13
+ container: silkeh/clang:15-bullseye
steps:
- uses: actions/checkout@v3
- name: Install requirements
- run: apt-get update && apt-get install -y python3-dev python3-pytest
+ run: apt-get update && apt-get install -y git python3-dev python3-pytest
- name: Configure
run: >
diff --git a/wrap/pybind11/.github/workflows/labeler.yml b/wrap/pybind11/.github/workflows/labeler.yml
index d2b597968..858a4a0e2 100644
--- a/wrap/pybind11/.github/workflows/labeler.yml
+++ b/wrap/pybind11/.github/workflows/labeler.yml
@@ -3,14 +3,23 @@ on:
pull_request_target:
types: [closed]
+permissions: {}
+
jobs:
label:
name: Labeler
runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ pull-requests: write
steps:
- uses: actions/labeler@main
- if: github.event.pull_request.merged == true
+ if: >
+ github.event.pull_request.merged == true &&
+ !startsWith(github.event.pull_request.title, 'chore(deps):') &&
+ !startsWith(github.event.pull_request.title, 'ci(fix):') &&
+ !startsWith(github.event.pull_request.title, 'docs(changelog):')
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
configuration-path: .github/labeler_merged.yml
diff --git a/wrap/pybind11/.github/workflows/pip.yml b/wrap/pybind11/.github/workflows/pip.yml
index 2c16735e1..d6687b441 100644
--- a/wrap/pybind11/.github/workflows/pip.yml
+++ b/wrap/pybind11/.github/workflows/pip.yml
@@ -12,7 +12,11 @@ on:
types:
- published
+permissions:
+ contents: read
+
env:
+ PIP_BREAK_SYSTEM_PACKAGES: 1
PIP_ONLY_BINARY: numpy
jobs:
@@ -98,13 +102,13 @@ jobs:
- uses: actions/download-artifact@v3
- name: Publish standard package
- uses: pypa/gh-action-pypi-publish@v1.5.0
+ uses: pypa/gh-action-pypi-publish@release/v1
with:
password: ${{ secrets.pypi_password }}
- packages_dir: standard/
+ packages-dir: standard/
- name: Publish global package
- uses: pypa/gh-action-pypi-publish@v1.5.0
+ uses: pypa/gh-action-pypi-publish@release/v1
with:
password: ${{ secrets.pypi_password_global }}
- packages_dir: global/
+ packages-dir: global/
diff --git a/wrap/pybind11/.github/workflows/upstream.yml b/wrap/pybind11/.github/workflows/upstream.yml
index a40a6c7d7..dd8a1c960 100644
--- a/wrap/pybind11/.github/workflows/upstream.yml
+++ b/wrap/pybind11/.github/workflows/upstream.yml
@@ -1,112 +1,116 @@
-
name: Upstream
on:
workflow_dispatch:
pull_request:
+permissions:
+ contents: read
+
concurrency:
group: upstream-${{ github.ref }}
cancel-in-progress: true
env:
- PIP_ONLY_BINARY: numpy
+ PIP_BREAK_SYSTEM_PACKAGES: 1
+ PIP_ONLY_BINARY: ":all:"
+ # For cmake:
+ VERBOSE: 1
jobs:
standard:
- name: "🐍 3.11 latest internals • ubuntu-latest • x64"
+ name: "🐍 3.12 latest • ubuntu-latest • x64"
runs-on: ubuntu-latest
+ # Only runs when the 'python dev' label is selected
if: "contains(github.event.pull_request.labels.*.name, 'python dev')"
steps:
- uses: actions/checkout@v3
- - name: Setup Python 3.11
+ - name: Setup Python 3.12
uses: actions/setup-python@v4
with:
- python-version: "3.11-dev"
+ python-version: "3.12-dev"
- - name: Setup Boost (Linux)
- if: runner.os == 'Linux'
+ - name: Setup Boost
run: sudo apt-get install libboost-dev
- name: Update CMake
- uses: jwlawson/actions-setup-cmake@v1.12
+ uses: jwlawson/actions-setup-cmake@v1.14
- - name: Prepare env
+ - name: Run pip installs
run: |
+ python -m pip install --upgrade pip
python -m pip install -r tests/requirements.txt
- - name: Setup annotations on Linux
- if: runner.os == 'Linux'
- run: python -m pip install pytest-github-actions-annotate-failures
+ - name: Show platform info
+ run: |
+ python -m platform
+ cmake --version
+ pip list
# First build - C++11 mode and inplace
- name: Configure C++11
run: >
- cmake -S . -B .
+ cmake -S . -B build11
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=11
+ -DCMAKE_BUILD_TYPE=Debug
- name: Build C++11
- run: cmake --build . -j 2
+ run: cmake --build build11 -j 2
- name: Python tests C++11
- run: cmake --build . --target pytest -j 2
+ run: cmake --build build11 --target pytest -j 2
- name: C++11 tests
- run: cmake --build . --target cpptest -j 2
+ run: cmake --build build11 --target cpptest -j 2
- name: Interface test C++11
- run: cmake --build . --target test_cmake_build
-
- - name: Clean directory
- run: git clean -fdx
+ run: cmake --build build11 --target test_cmake_build
# Second build - C++17 mode and in a build directory
- name: Configure C++17
run: >
- cmake -S . -B build2
+ cmake -S . -B build17
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=17
- ${{ matrix.args }}
- ${{ matrix.args2 }}
- - name: Build
- run: cmake --build build2 -j 2
+ - name: Build C++17
+ run: cmake --build build17 -j 2
- - name: Python tests
- run: cmake --build build2 --target pytest
+ - name: Python tests C++17
+ run: cmake --build build17 --target pytest
- - name: C++ tests
- run: cmake --build build2 --target cpptest
+ - name: C++17 tests
+ run: cmake --build build17 --target cpptest
# Third build - C++17 mode with unstable ABI
- name: Configure (unstable ABI)
run: >
- cmake -S . -B build3
+ cmake -S . -B build17max
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=17
-DPYBIND11_INTERNALS_VERSION=10000000
- "-DPYBIND11_TEST_OVERRIDE=test_call_policies.cpp;test_gil_scoped.cpp;test_thread.cpp"
- ${{ matrix.args }}
- name: Build (unstable ABI)
- run: cmake --build build3 -j 2
+ run: cmake --build build17max -j 2
- name: Python tests (unstable ABI)
- run: cmake --build build3 --target pytest
+ run: cmake --build build17max --target pytest
- - name: Interface test
- run: cmake --build build2 --target test_cmake_build
+ - name: Interface test (unstable ABI)
+ run: cmake --build build17max --target test_cmake_build
# This makes sure the setup_helpers module can build packages using
# setuptools
- name: Setuptools helpers test
- run: pytest tests/extra_setuptools
+ run: |
+ pip install setuptools
+ pytest tests/extra_setuptools
diff --git a/wrap/pybind11/.gitignore b/wrap/pybind11/.gitignore
index 3cf4fbbda..43d5094c9 100644
--- a/wrap/pybind11/.gitignore
+++ b/wrap/pybind11/.gitignore
@@ -43,3 +43,4 @@ pybind11Targets.cmake
/pybind11/share/*
/docs/_build/*
.ipynb_checkpoints/
+tests/main.cpp
diff --git a/wrap/pybind11/.pre-commit-config.yaml b/wrap/pybind11/.pre-commit-config.yaml
index ba9955a31..86ac965d9 100644
--- a/wrap/pybind11/.pre-commit-config.yaml
+++ b/wrap/pybind11/.pre-commit-config.yaml
@@ -12,10 +12,62 @@
#
# See https://github.com/pre-commit/pre-commit
+
+ci:
+ autoupdate_commit_msg: "chore(deps): update pre-commit hooks"
+ autofix_commit_msg: "style: pre-commit fixes"
+ autoupdate_schedule: monthly
+
+# third-party content
+exclude: ^tools/JoinPaths.cmake$
+
repos:
+
+# Clang format the codebase automatically
+- repo: https://github.com/pre-commit/mirrors-clang-format
+ rev: "v16.0.6"
+ hooks:
+ - id: clang-format
+ types_or: [c++, c, cuda]
+
+# Black, the code formatter, natively supports pre-commit
+- repo: https://github.com/psf/black
+ rev: "23.3.0" # Keep in sync with blacken-docs
+ hooks:
+ - id: black
+
+# Ruff, the Python auto-correcting linter written in Rust
+- repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.0.276
+ hooks:
+ - id: ruff
+ args: ["--fix", "--show-fixes"]
+
+# Check static types with mypy
+- repo: https://github.com/pre-commit/mirrors-mypy
+ rev: "v1.4.1"
+ hooks:
+ - id: mypy
+ args: []
+ exclude: ^(tests|docs)/
+ additional_dependencies:
+ - markdown-it-py<3 # Drop this together with dropping Python 3.7 support.
+ - nox
+ - rich
+ - types-setuptools
+
+# CMake formatting
+- repo: https://github.com/cheshirekow/cmake-format-precommit
+ rev: "v0.6.13"
+ hooks:
+ - id: cmake-format
+ additional_dependencies: [pyyaml]
+ types: [file]
+ files: (\.cmake|CMakeLists.txt)(.in)?$
+
# Standard hooks
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: "v4.3.0"
+ rev: "v4.4.0"
hooks:
- id: check-added-large-files
- id: check-case-conflict
@@ -30,109 +82,38 @@ repos:
- id: requirements-txt-fixer
- id: trailing-whitespace
-# Upgrade old Python syntax
-- repo: https://github.com/asottile/pyupgrade
- rev: "v2.37.1"
- hooks:
- - id: pyupgrade
- args: [--py36-plus]
-
-# Nicely sort includes
-- repo: https://github.com/PyCQA/isort
- rev: "5.10.1"
- hooks:
- - id: isort
-
-# Black, the code formatter, natively supports pre-commit
-- repo: https://github.com/psf/black
- rev: "22.6.0" # Keep in sync with blacken-docs
- hooks:
- - id: black
-
# Also code format the docs
- repo: https://github.com/asottile/blacken-docs
- rev: "v1.12.1"
+ rev: "1.14.0"
hooks:
- id: blacken-docs
additional_dependencies:
- - black==22.6.0 # keep in sync with black hook
+ - black==23.3.0 # keep in sync with black hook
# Changes tabs to spaces
- repo: https://github.com/Lucas-C/pre-commit-hooks
- rev: "v1.3.0"
+ rev: "v1.5.1"
hooks:
- id: remove-tabs
+# Avoid directional quotes
- repo: https://github.com/sirosen/texthooks
- rev: "0.3.1"
+ rev: "0.5.0"
hooks:
- id: fix-ligatures
- id: fix-smartquotes
-# Autoremoves unused imports
-- repo: https://github.com/hadialqattan/pycln
- rev: "v2.0.1"
- hooks:
- - id: pycln
- stages: [manual]
-
# Checking for common mistakes
- repo: https://github.com/pre-commit/pygrep-hooks
- rev: "v1.9.0"
+ rev: "v1.10.0"
hooks:
- - id: python-check-blanket-noqa
- - id: python-check-blanket-type-ignore
- - id: python-no-log-warn
- - id: python-use-type-annotations
- id: rst-backticks
- id: rst-directive-colons
- id: rst-inline-touching-normal
-# Automatically remove noqa that are not used
-- repo: https://github.com/asottile/yesqa
- rev: "v1.3.0"
- hooks:
- - id: yesqa
- additional_dependencies: &flake8_dependencies
- - flake8-bugbear
- - pep8-naming
-
-# Flake8 also supports pre-commit natively (same author)
-- repo: https://github.com/PyCQA/flake8
- rev: "4.0.1"
- hooks:
- - id: flake8
- exclude: ^(docs/.*|tools/.*)$
- additional_dependencies: *flake8_dependencies
-
-# PyLint has native support - not always usable, but works for us
-- repo: https://github.com/PyCQA/pylint
- rev: "v2.14.4"
- hooks:
- - id: pylint
- files: ^pybind11
-
-# CMake formatting
-- repo: https://github.com/cheshirekow/cmake-format-precommit
- rev: "v0.6.13"
- hooks:
- - id: cmake-format
- additional_dependencies: [pyyaml]
- types: [file]
- files: (\.cmake|CMakeLists.txt)(.in)?$
-
-# Check static types with mypy
-- repo: https://github.com/pre-commit/mirrors-mypy
- rev: "v0.961"
- hooks:
- - id: mypy
- args: []
- exclude: ^(tests|docs)/
- additional_dependencies: [nox, rich]
-
# Checks the manifest for missing files (native support)
- repo: https://github.com/mgedmin/check-manifest
- rev: "0.48"
+ rev: "0.49"
hooks:
- id: check-manifest
# This is a slow hook, so only run this if --hook-stage manual is passed
@@ -140,16 +121,18 @@ repos:
additional_dependencies: [cmake, ninja]
# Check for spelling
+# Use tools/codespell_ignore_lines_from_errors.py
+# to rebuild .codespell-ignore-lines
- repo: https://github.com/codespell-project/codespell
- rev: "v2.1.0"
+ rev: "v2.2.5"
hooks:
- id: codespell
exclude: ".supp$"
- args: ["-L", "nd,ot,thist"]
+ args: ["-x.codespell-ignore-lines", "-Lccompiler"]
# Check for common shell mistakes
- repo: https://github.com/shellcheck-py/shellcheck-py
- rev: "v0.8.0.4"
+ rev: "v0.9.0.5"
hooks:
- id: shellcheck
@@ -162,9 +145,9 @@ repos:
entry: PyBind|Numpy|Cmake|CCache|PyTest
exclude: ^\.pre-commit-config.yaml$
-# Clang format the codebase automatically
-- repo: https://github.com/pre-commit/mirrors-clang-format
- rev: "v14.0.6"
+# PyLint has native support - not always usable, but works for us
+- repo: https://github.com/PyCQA/pylint
+ rev: "v3.0.0a6"
hooks:
- - id: clang-format
- types_or: [c++, c, cuda]
+ - id: pylint
+ files: ^pybind11
diff --git a/wrap/pybind11/CMakeLists.txt b/wrap/pybind11/CMakeLists.txt
index 3787982cb..87ec10346 100644
--- a/wrap/pybind11/CMakeLists.txt
+++ b/wrap/pybind11/CMakeLists.txt
@@ -5,15 +5,15 @@
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
-cmake_minimum_required(VERSION 3.4)
+cmake_minimum_required(VERSION 3.5)
-# The `cmake_minimum_required(VERSION 3.4...3.22)` syntax does not work with
+# The `cmake_minimum_required(VERSION 3.5...3.26)` syntax does not work with
# some versions of VS that have a patched CMake 3.11. This forces us to emulate
# the behavior using the following workaround:
-if(${CMAKE_VERSION} VERSION_LESS 3.22)
+if(${CMAKE_VERSION} VERSION_LESS 3.26)
cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
else()
- cmake_policy(VERSION 3.22)
+ cmake_policy(VERSION 3.26)
endif()
# Avoid infinite recursion if tests include this as a subdirectory
@@ -91,10 +91,16 @@ endif()
option(PYBIND11_INSTALL "Install pybind11 header files?" ${PYBIND11_MASTER_PROJECT})
option(PYBIND11_TEST "Build pybind11 test suite?" ${PYBIND11_MASTER_PROJECT})
option(PYBIND11_NOPYTHON "Disable search for Python" OFF)
+option(PYBIND11_SIMPLE_GIL_MANAGEMENT
+ "Use simpler GIL management logic that does not support disassociation" OFF)
set(PYBIND11_INTERNALS_VERSION
""
CACHE STRING "Override the ABI version, may be used to enable the unstable ABI.")
+if(PYBIND11_SIMPLE_GIL_MANAGEMENT)
+ add_compile_definitions(PYBIND11_SIMPLE_GIL_MANAGEMENT)
+endif()
+
cmake_dependent_option(
USE_PYTHON_INCLUDE_DIR
"Install pybind11 headers in Python include directory instead of default installation prefix"
@@ -120,6 +126,9 @@ set(PYBIND11_HEADERS
include/pybind11/complex.h
include/pybind11/options.h
include/pybind11/eigen.h
+ include/pybind11/eigen/common.h
+ include/pybind11/eigen/matrix.h
+ include/pybind11/eigen/tensor.h
include/pybind11/embed.h
include/pybind11/eval.h
include/pybind11/gil.h
@@ -131,7 +140,8 @@ set(PYBIND11_HEADERS
include/pybind11/pytypes.h
include/pybind11/stl.h
include/pybind11/stl_bind.h
- include/pybind11/stl/filesystem.h)
+ include/pybind11/stl/filesystem.h
+ include/pybind11/type_caster_pyobject_ptr.h)
# Compare with grep and warn if mismatched
if(PYBIND11_MASTER_PROJECT AND NOT CMAKE_VERSION VERSION_LESS 3.12)
@@ -198,6 +208,9 @@ else()
endif()
include("${CMAKE_CURRENT_SOURCE_DIR}/tools/pybind11Common.cmake")
+# https://github.com/jtojnar/cmake-snips/#concatenating-paths-when-building-pkg-config-files
+# TODO: cmake 3.20 adds the cmake_path() function, which obsoletes this snippet
+include("${CMAKE_CURRENT_SOURCE_DIR}/tools/JoinPaths.cmake")
# Relative directory setting
if(USE_PYTHON_INCLUDE_DIR AND DEFINED Python_INCLUDE_DIRS)
@@ -262,6 +275,16 @@ if(PYBIND11_INSTALL)
NAMESPACE "pybind11::"
DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})
+ # pkg-config support
+ if(NOT prefix_for_pc_file)
+ set(prefix_for_pc_file "${CMAKE_INSTALL_PREFIX}")
+ endif()
+ join_paths(includedir_for_pc_file "\${prefix}" "${CMAKE_INSTALL_INCLUDEDIR}")
+ configure_file("${CMAKE_CURRENT_SOURCE_DIR}/tools/pybind11.pc.in"
+ "${CMAKE_CURRENT_BINARY_DIR}/pybind11.pc" @ONLY)
+ install(FILES "${CMAKE_CURRENT_BINARY_DIR}/pybind11.pc"
+ DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/pkgconfig/")
+
# Uninstall target
if(PYBIND11_MASTER_PROJECT)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/tools/cmake_uninstall.cmake.in"
diff --git a/wrap/pybind11/MANIFEST.in b/wrap/pybind11/MANIFEST.in
index 033303a74..7ce83c552 100644
--- a/wrap/pybind11/MANIFEST.in
+++ b/wrap/pybind11/MANIFEST.in
@@ -1,5 +1,6 @@
+prune tests
recursive-include pybind11/include/pybind11 *.h
recursive-include pybind11 *.py
recursive-include pybind11 py.typed
include pybind11/share/cmake/pybind11/*.cmake
-include LICENSE README.rst pyproject.toml setup.py setup.cfg
+include LICENSE README.rst SECURITY.md pyproject.toml setup.py setup.cfg
diff --git a/wrap/pybind11/README.rst b/wrap/pybind11/README.rst
index 3c75edb57..80213a406 100644
--- a/wrap/pybind11/README.rst
+++ b/wrap/pybind11/README.rst
@@ -135,7 +135,7 @@ This project was created by `Wenzel
Jakob `_. Significant features and/or
improvements to the code were contributed by Jonas Adler, Lori A. Burns,
Sylvain Corlay, Eric Cousineau, Aaron Gokaslan, Ralf Grosse-Kunstleve, Trent Houliston, Axel
-Huebl, @hulucc, Yannick Jadoul, Sergey Lyskov Johan Mabille, Tomasz Miąsko,
+Huebl, @hulucc, Yannick Jadoul, Sergey Lyskov, Johan Mabille, Tomasz Miąsko,
Dean Moldovan, Ben Pritchard, Jason Rhinelander, Boris Schäling, Pim
Schellart, Henry Schreiner, Ivan Smirnov, Boris Staletic, and Patrick Stewart.
diff --git a/wrap/pybind11/SECURITY.md b/wrap/pybind11/SECURITY.md
new file mode 100644
index 000000000..3d74611f2
--- /dev/null
+++ b/wrap/pybind11/SECURITY.md
@@ -0,0 +1,13 @@
+# Security Policy
+
+## Supported Versions
+
+Security updates are applied only to the latest release.
+
+## Reporting a Vulnerability
+
+If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
+
+Please disclose it at [security advisory](https://github.com/pybind/pybind11/security/advisories/new).
+
+This project is maintained by a team of volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure.
diff --git a/wrap/pybind11/docs/advanced/cast/custom.rst b/wrap/pybind11/docs/advanced/cast/custom.rst
index 1df4d3e14..8138cac61 100644
--- a/wrap/pybind11/docs/advanced/cast/custom.rst
+++ b/wrap/pybind11/docs/advanced/cast/custom.rst
@@ -38,7 +38,7 @@ type is explicitly allowed.
.. code-block:: cpp
- namespace pybind11 { namespace detail {
+ namespace PYBIND11_NAMESPACE { namespace detail {
template <> struct type_caster {
public:
/**
@@ -78,7 +78,7 @@ type is explicitly allowed.
return PyLong_FromLong(src.long_value);
}
};
- }} // namespace pybind11::detail
+ }} // namespace PYBIND11_NAMESPACE::detail
.. note::
diff --git a/wrap/pybind11/docs/advanced/cast/stl.rst b/wrap/pybind11/docs/advanced/cast/stl.rst
index 109763f7a..03d49b295 100644
--- a/wrap/pybind11/docs/advanced/cast/stl.rst
+++ b/wrap/pybind11/docs/advanced/cast/stl.rst
@@ -42,7 +42,7 @@ types:
.. code-block:: cpp
// `boost::optional` as an example -- can be any `std::optional`-like container
- namespace pybind11 { namespace detail {
+ namespace PYBIND11_NAMESPACE { namespace detail {
template
struct type_caster> : optional_caster> {};
}}
@@ -54,7 +54,7 @@ for custom variant types:
.. code-block:: cpp
// `boost::variant` as an example -- can be any `std::variant`-like container
- namespace pybind11 { namespace detail {
+ namespace PYBIND11_NAMESPACE { namespace detail {
template
struct type_caster> : variant_caster> {};
@@ -66,7 +66,7 @@ for custom variant types:
return boost::apply_visitor(args...);
}
};
- }} // namespace pybind11::detail
+ }} // namespace PYBIND11_NAMESPACE::detail
The ``visit_helper`` specialization is not required if your ``name::variant`` provides
a ``name::visit()`` function. For any other function name, the specialization must be
diff --git a/wrap/pybind11/docs/advanced/cast/strings.rst b/wrap/pybind11/docs/advanced/cast/strings.rst
index e246c5219..271716b4b 100644
--- a/wrap/pybind11/docs/advanced/cast/strings.rst
+++ b/wrap/pybind11/docs/advanced/cast/strings.rst
@@ -101,8 +101,11 @@ conversion has the same overhead as implicit conversion.
m.def("str_output",
[]() {
std::string s = "Send your r\xe9sum\xe9 to Alice in HR"; // Latin-1
- py::str py_s = PyUnicode_DecodeLatin1(s.data(), s.length());
- return py_s;
+ py::handle py_s = PyUnicode_DecodeLatin1(s.data(), s.length(), nullptr);
+ if (!py_s) {
+ throw py::error_already_set();
+ }
+ return py::reinterpret_steal(py_s);
}
);
@@ -113,7 +116,8 @@ conversion has the same overhead as implicit conversion.
The `Python C API
`_ provides
-several built-in codecs.
+several built-in codecs. Note that these all return *new* references, so
+use :cpp:func:`reinterpret_steal` when converting them to a :cpp:class:`str`.
One could also use a third party encoding library such as libiconv to transcode
diff --git a/wrap/pybind11/docs/advanced/classes.rst b/wrap/pybind11/docs/advanced/classes.rst
index 49ddf5c0b..01a490b72 100644
--- a/wrap/pybind11/docs/advanced/classes.rst
+++ b/wrap/pybind11/docs/advanced/classes.rst
@@ -1228,7 +1228,7 @@ whether a downcast is safe, you can proceed by specializing the
std::string bark() const { return sound; }
};
- namespace pybind11 {
+ namespace PYBIND11_NAMESPACE {
template<> struct polymorphic_type_hook {
static const void *get(const Pet *src, const std::type_info*& type) {
// note that src may be nullptr
@@ -1239,7 +1239,7 @@ whether a downcast is safe, you can proceed by specializing the
return src;
}
};
- } // namespace pybind11
+ } // namespace PYBIND11_NAMESPACE
When pybind11 wants to convert a C++ pointer of type ``Base*`` to a
Python object, it calls ``polymorphic_type_hook::get()`` to
diff --git a/wrap/pybind11/docs/advanced/embedding.rst b/wrap/pybind11/docs/advanced/embedding.rst
index dd980d483..e6a1686f8 100644
--- a/wrap/pybind11/docs/advanced/embedding.rst
+++ b/wrap/pybind11/docs/advanced/embedding.rst
@@ -18,7 +18,7 @@ information, see :doc:`/compiling`.
.. code-block:: cmake
- cmake_minimum_required(VERSION 3.4)
+ cmake_minimum_required(VERSION 3.5...3.26)
project(example)
find_package(pybind11 REQUIRED) # or `add_subdirectory(pybind11)`
diff --git a/wrap/pybind11/docs/advanced/exceptions.rst b/wrap/pybind11/docs/advanced/exceptions.rst
index 2211caf5d..53981dc08 100644
--- a/wrap/pybind11/docs/advanced/exceptions.rst
+++ b/wrap/pybind11/docs/advanced/exceptions.rst
@@ -177,9 +177,12 @@ section.
may be explicitly (re-)thrown to delegate it to the other,
previously-declared existing exception translators.
- Note that ``libc++`` and ``libstdc++`` `behave differently `_
- with ``-fvisibility=hidden``. Therefore exceptions that are used across ABI boundaries need to be explicitly exported, as exercised in ``tests/test_exceptions.h``.
- See also: "Problems with C++ exceptions" under `GCC Wiki `_.
+ Note that ``libc++`` and ``libstdc++`` `behave differently under macOS
+ `_
+ with ``-fvisibility=hidden``. Therefore exceptions that are used across ABI
+ boundaries need to be explicitly exported, as exercised in
+ ``tests/test_exceptions.h``. See also:
+ "Problems with C++ exceptions" under `GCC Wiki `_.
Local vs Global Exception Translators
diff --git a/wrap/pybind11/docs/advanced/misc.rst b/wrap/pybind11/docs/advanced/misc.rst
index edab15fcb..805ec838f 100644
--- a/wrap/pybind11/docs/advanced/misc.rst
+++ b/wrap/pybind11/docs/advanced/misc.rst
@@ -39,15 +39,42 @@ The ``PYBIND11_MAKE_OPAQUE`` macro does *not* require the above workarounds.
Global Interpreter Lock (GIL)
=============================
-When calling a C++ function from Python, the GIL is always held.
+The Python C API dictates that the Global Interpreter Lock (GIL) must always
+be held by the current thread to safely access Python objects. As a result,
+when Python calls into C++ via pybind11 the GIL must be held, and pybind11
+will never implicitly release the GIL.
+
+.. code-block:: cpp
+
+ void my_function() {
+ /* GIL is held when this function is called from Python */
+ }
+
+ PYBIND11_MODULE(example, m) {
+ m.def("my_function", &my_function);
+ }
+
+pybind11 will ensure that the GIL is held when it knows that it is calling
+Python code. For example, if a Python callback is passed to C++ code via
+``std::function``, when C++ code calls the function the built-in wrapper
+will acquire the GIL before calling the Python callback. Similarly, the
+``PYBIND11_OVERRIDE`` family of macros will acquire the GIL before calling
+back into Python.
+
+When writing C++ code that is called from other C++ code, if that code accesses
+Python state, it must explicitly acquire and release the GIL.
+
The classes :class:`gil_scoped_release` and :class:`gil_scoped_acquire` can be
used to acquire and release the global interpreter lock in the body of a C++
function call. In this way, long-running C++ code can be parallelized using
-multiple Python threads. Taking :ref:`overriding_virtuals` as an example, this
+multiple Python threads, **but great care must be taken** when any
+:class:`gil_scoped_release` appear: if there is any way that the C++ code
+can access Python objects, :class:`gil_scoped_acquire` should be used to
+reacquire the GIL. Taking :ref:`overriding_virtuals` as an example, this
could be realized as follows (important changes highlighted):
.. code-block:: cpp
- :emphasize-lines: 8,9,31,32
+ :emphasize-lines: 8,30,31
class PyAnimal : public Animal {
public:
@@ -56,9 +83,7 @@ could be realized as follows (important changes highlighted):
/* Trampoline (need one for each virtual function) */
std::string go(int n_times) {
- /* Acquire GIL before calling Python code */
- py::gil_scoped_acquire acquire;
-
+ /* PYBIND11_OVERRIDE_PURE will acquire the GIL before accessing Python state */
PYBIND11_OVERRIDE_PURE(
std::string, /* Return type */
Animal, /* Parent class */
@@ -78,7 +103,8 @@ could be realized as follows (important changes highlighted):
.def(py::init<>());
m.def("call_go", [](Animal *animal) -> std::string {
- /* Release GIL before calling into (potentially long-running) C++ code */
+ // GIL is held when called from Python code. Release GIL before
+ // calling into (potentially long-running) C++ code
py::gil_scoped_release release;
return call_go(animal);
});
@@ -92,6 +118,34 @@ The ``call_go`` wrapper can also be simplified using the ``call_guard`` policy
m.def("call_go", &call_go, py::call_guard());
+Common Sources Of Global Interpreter Lock Errors
+==================================================================
+
+Failing to properly hold the Global Interpreter Lock (GIL) is one of the
+more common sources of bugs within code that uses pybind11. If you are
+running into GIL related errors, we highly recommend you consult the
+following checklist.
+
+- Do you have any global variables that are pybind11 objects or invoke
+ pybind11 functions in either their constructor or destructor? You are generally
+ not allowed to invoke any Python function in a global static context. We recommend
+ using lazy initialization and then intentionally leaking at the end of the program.
+
+- Do you have any pybind11 objects that are members of other C++ structures? One
+ commonly overlooked requirement is that pybind11 objects have to increase their reference count
+ whenever their copy constructor is called. Thus, you need to be holding the GIL to invoke
+ the copy constructor of any C++ class that has a pybind11 member. This can sometimes be very
+ tricky to track for complicated programs Think carefully when you make a pybind11 object
+ a member in another struct.
+
+- C++ destructors that invoke Python functions can be particularly troublesome as
+ destructors can sometimes get invoked in weird and unexpected circumstances as a result
+ of exceptions.
+
+- You should try running your code in a debug build. That will enable additional assertions
+ within pybind11 that will throw exceptions on certain GIL handling errors
+ (reference counting operations).
+
Binding sequence data types, iterators, the slicing protocol, etc.
==================================================================
@@ -298,6 +352,15 @@ The class ``options`` allows you to selectively suppress auto-generated signatur
m.def("add", [](int a, int b) { return a + b; }, "A function which adds two numbers");
}
+pybind11 also appends all members of an enum to the resulting enum docstring.
+This default behavior can be disabled by using the ``disable_enum_members_docstring()``
+function of the ``options`` class.
+
+With ``disable_user_defined_docstrings()`` all user defined docstrings of
+``module_::def()``, ``class_::def()`` and ``enum_()`` are disabled, but the
+function signatures and enum members are included in the docstring, unless they
+are disabled separately.
+
Note that changes to the settings affect only function bindings created during the
lifetime of the ``options`` instance. When it goes out of scope at the end of the module's init function,
the default settings are restored to prevent unwanted side effects.
diff --git a/wrap/pybind11/docs/advanced/pycpp/numpy.rst b/wrap/pybind11/docs/advanced/pycpp/numpy.rst
index b6ef019ed..07c969305 100644
--- a/wrap/pybind11/docs/advanced/pycpp/numpy.rst
+++ b/wrap/pybind11/docs/advanced/pycpp/numpy.rst
@@ -433,7 +433,7 @@ following:
{ 2, 4 }, // shape (rows, cols)
{ sizeof(uint8_t) * 4, sizeof(uint8_t) } // strides in bytes
);
- })
+ });
This approach is meant for providing a ``memoryview`` for a C/C++ buffer not
managed by Python. The user is responsible for managing the lifetime of the
@@ -449,7 +449,7 @@ We can also use ``memoryview::from_memory`` for a simple 1D contiguous buffer:
buffer, // buffer pointer
sizeof(uint8_t) * 8 // buffer size
);
- })
+ });
.. versionchanged:: 2.6
``memoryview::from_memory`` added.
diff --git a/wrap/pybind11/docs/advanced/smart_ptrs.rst b/wrap/pybind11/docs/advanced/smart_ptrs.rst
index 5a2220109..3c40ce123 100644
--- a/wrap/pybind11/docs/advanced/smart_ptrs.rst
+++ b/wrap/pybind11/docs/advanced/smart_ptrs.rst
@@ -157,7 +157,7 @@ specialized:
PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr);
// Only needed if the type's `.get()` goes by another name
- namespace pybind11 { namespace detail {
+ namespace PYBIND11_NAMESPACE { namespace detail {
template
struct holder_helper> { // <-- specialization
static const T *get(const SmartPtr