Merge branch 'develop' into fix/check-mex

release/4.3a0
Varun Agrawal 2020-11-18 16:00:32 -05:00
commit 1fe86fa49d
82 changed files with 1675 additions and 1164 deletions

View File

@ -43,11 +43,6 @@ if [ -z ${PYTHON_VERSION+x} ]; then
exit 127 exit 127
fi fi
if [ -z ${WRAPPER+x} ]; then
echo "Please provide the wrapper to build!"
exit 126
fi
PYTHON="python${PYTHON_VERSION}" PYTHON="python${PYTHON_VERSION}"
if [[ $(uname) == "Darwin" ]]; then if [[ $(uname) == "Darwin" ]]; then
@ -61,25 +56,11 @@ PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb [ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
case $WRAPPER in
"cython")
BUILD_CYTHON="ON"
BUILD_PYBIND="OFF"
TYPEDEF_POINTS_TO_VECTORS="OFF"
sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/cython/requirements.txt BUILD_PYBIND="ON"
;; TYPEDEF_POINTS_TO_VECTORS="ON"
"pybind")
BUILD_CYTHON="OFF"
BUILD_PYBIND="ON"
TYPEDEF_POINTS_TO_VECTORS="ON"
sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
;;
*)
exit 126
;;
esac
mkdir $GITHUB_WORKSPACE/build mkdir $GITHUB_WORKSPACE/build
cd $GITHUB_WORKSPACE/build cd $GITHUB_WORKSPACE/build
@ -90,7 +71,6 @@ cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release \
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \ -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DGTSAM_INSTALL_CYTHON_TOOLBOX=${BUILD_CYTHON} \
-DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \ -DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \
-DGTSAM_TYPEDEF_POINTS_TO_VECTORS=${TYPEDEF_POINTS_TO_VECTORS} \ -DGTSAM_TYPEDEF_POINTS_TO_VECTORS=${TYPEDEF_POINTS_TO_VECTORS} \
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \ -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
@ -98,30 +78,10 @@ cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \ -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \
-DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install -DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
make -j$(nproc) install & make -j$(nproc) install
while ps -p $! > /dev/null
do
sleep 60
now=$(date +%s)
printf "%d seconds have elapsed\n" $(( (now - start) ))
done
case $WRAPPER in cd $GITHUB_WORKSPACE/build/python
"cython") $PYTHON setup.py install --user --prefix=
cd $GITHUB_WORKSPACE/build/cython cd $GITHUB_WORKSPACE/python/gtsam/tests
$PYTHON setup.py install --user --prefix= $PYTHON -m unittest discover
cd $GITHUB_WORKSPACE/build/cython/gtsam/tests
$PYTHON -m unittest discover
;;
"pybind")
cd $GITHUB_WORKSPACE/build/python
$PYTHON setup.py install --user --prefix=
cd $GITHUB_WORKSPACE/python/gtsam/tests
$PYTHON -m unittest discover
;;
*)
echo "THIS SHOULD NEVER HAPPEN!"
exit 125
;;
esac

View File

@ -66,6 +66,8 @@ function configure()
-DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V41=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \ -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \
-DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \ -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \
-DGTSAM_ROT3_EXPMAP=${GTSAM_ROT3_EXPMAP:-ON} \
-DGTSAM_POSE3_EXPMAP=${GTSAM_POSE3_EXPMAP:-ON} \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DBOOST_ROOT=$BOOST_ROOT \ -DBOOST_ROOT=$BOOST_ROOT \
-DBoost_NO_SYSTEM_PATHS=ON \ -DBoost_NO_SYSTEM_PATHS=ON \

View File

@ -48,25 +48,32 @@ jobs:
- name: Install (Linux) - name: Install (Linux)
if: runner.os == 'Linux' if: runner.os == 'Linux'
run: | run: |
# LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. # LLVM (clang) 9 is not in Bionic's repositories so we add the official LLVM repository.
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
# (ipv4|ha).pool.sks-keyservers.net is the SKS GPG global keyserver pool
# ipv4 avoids potential timeouts because of crappy IPv6 infrastructure
# 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository
# This key is not in the keystore by default for Ubuntu so we need to add it.
LLVM_KEY=15CF4D18AF4F7421
gpg --keyserver ipv4.pool.sks-keyservers.net --recv-key $LLVM_KEY || gpg --keyserver ha.pool.sks-keyservers.net --recv-key $LLVM_KEY
gpg -a --export $LLVM_KEY | sudo apt-key add -
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
fi fi
sudo apt-get -y update sudo apt-get -y update
sudo apt install cmake build-essential pkg-config libpython-dev python-numpy sudo apt install cmake build-essential pkg-config libpython-dev python-numpy
echo "::set-env name=BOOST_ROOT::$(echo $BOOST_ROOT_1_69_0)" echo "BOOST_ROOT=$(echo $BOOST_ROOT_1_72_0)" >> $GITHUB_ENV
echo "::set-env name=LD_LIBRARY_PATH::$(echo $BOOST_ROOT_1_69_0/lib)" echo "LD_LIBRARY_PATH=$(echo $BOOST_ROOT_1_72_0/lib)" >> $GITHUB_ENV
if [ "${{ matrix.compiler }}" = "gcc" ]; then if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
echo "::set-env name=CC::gcc-${{ matrix.version }}" echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
echo "::set-env name=CXX::g++-${{ matrix.version }}" echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
else else
sudo apt-get install -y clang-${{ matrix.version }} g++-multilib sudo apt-get install -y clang-${{ matrix.version }} g++-multilib
echo "::set-env name=CC::clang-${{ matrix.version }}" echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV
echo "::set-env name=CXX::clang++-${{ matrix.version }}" echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV
fi fi
- name: Check Boost version - name: Check Boost version
if: runner.os == 'Linux' if: runner.os == 'Linux'

View File

@ -35,17 +35,19 @@ jobs:
- name: Install (macOS) - name: Install (macOS)
if: runner.os == 'macOS' if: runner.os == 'macOS'
run: | run: |
brew install cmake ninja boost brew tap ProfFan/robotics
brew install cmake ninja
brew install ProfFan/robotics/boost
if [ "${{ matrix.compiler }}" = "gcc" ]; then if [ "${{ matrix.compiler }}" = "gcc" ]; then
brew install gcc@${{ matrix.version }} brew install gcc@${{ matrix.version }}
echo "::set-env name=CC::gcc-${{ matrix.version }}" echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
echo "::set-env name=CXX::g++-${{ matrix.version }}" echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
else else
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
echo "::set-env name=CC::clang" echo "CC=clang" >> $GITHUB_ENV
echo "::set-env name=CXX::clang++" echo "CXX=clang++" >> $GITHUB_ENV
fi fi
- name: Build and Test (macOS) - name: Build and Test (macOS)
if: runner.os == 'macOS' if: runner.os == 'macOS'
run: | run: |
bash .github/scripts/unix.sh -t bash .github/scripts/unix.sh -t

View File

@ -12,7 +12,6 @@ jobs:
CTEST_PARALLEL_LEVEL: 2 CTEST_PARALLEL_LEVEL: 2
CMAKE_BUILD_TYPE: ${{ matrix.build_type }} CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
PYTHON_VERSION: ${{ matrix.python_version }} PYTHON_VERSION: ${{ matrix.python_version }}
WRAPPER: ${{ matrix.wrapper }}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -20,7 +19,7 @@ jobs:
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions. # See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [ name: [
ubuntu-18.04-gcc-5, ubuntu-18.04-gcc-5,
# ubuntu-18.04-gcc-9, # TODO Disabled for now because of timeouts ubuntu-18.04-gcc-9,
ubuntu-18.04-clang-9, ubuntu-18.04-clang-9,
macOS-10.15-xcode-11.3.1, macOS-10.15-xcode-11.3.1,
ubuntu-18.04-gcc-5-tbb, ubuntu-18.04-gcc-5-tbb,
@ -28,18 +27,16 @@ jobs:
build_type: [Debug, Release] build_type: [Debug, Release]
python_version: [3] python_version: [3]
wrapper: [pybind]
include: include:
- name: ubuntu-18.04-gcc-5 - name: ubuntu-18.04-gcc-5
os: ubuntu-18.04 os: ubuntu-18.04
compiler: gcc compiler: gcc
version: "5" version: "5"
# TODO Disabled for now because of timeouts - name: ubuntu-18.04-gcc-9
# - name: ubuntu-18.04-gcc-9 os: ubuntu-18.04
# os: ubuntu-18.04 compiler: gcc
# compiler: gcc version: "9"
# version: "9"
- name: ubuntu-18.04-clang-9 - name: ubuntu-18.04-clang-9
os: ubuntu-18.04 os: ubuntu-18.04
@ -63,8 +60,14 @@ jobs:
- name: Install (Linux) - name: Install (Linux)
if: runner.os == 'Linux' if: runner.os == 'Linux'
run: | run: |
# LLVM 9 is not in Bionic's repositories so we add the official LLVM repository.
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
# (ipv4|ha).pool.sks-keyservers.net is the SKS GPG global keyserver pool
# ipv4 avoids potential timeouts because of crappy IPv6 infrastructure
# 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository
# This key is not in the keystore by default for Ubuntu so we need to add it.
LLVM_KEY=15CF4D18AF4F7421
gpg --keyserver ipv4.pool.sks-keyservers.net --recv-key $LLVM_KEY || gpg --keyserver ha.pool.sks-keyservers.net --recv-key $LLVM_KEY
gpg -a --export $LLVM_KEY | sudo apt-key add -
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
fi fi
sudo apt-get -y update sudo apt-get -y update
@ -73,30 +76,32 @@ jobs:
if [ "${{ matrix.compiler }}" = "gcc" ]; then if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
echo "::set-env name=CC::gcc-${{ matrix.version }}" echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
echo "::set-env name=CXX::g++-${{ matrix.version }}" echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
else else
sudo apt-get install -y clang-${{ matrix.version }} g++-multilib sudo apt-get install -y clang-${{ matrix.version }} g++-multilib
echo "::set-env name=CC::clang-${{ matrix.version }}" echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV
echo "::set-env name=CXX::clang++-${{ matrix.version }}" echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV
fi fi
- name: Install (macOS) - name: Install (macOS)
if: runner.os == 'macOS' if: runner.os == 'macOS'
run: | run: |
brew install cmake ninja boost brew tap ProfFan/robotics
brew install cmake ninja
brew install ProfFan/robotics/boost
if [ "${{ matrix.compiler }}" = "gcc" ]; then if [ "${{ matrix.compiler }}" = "gcc" ]; then
brew install gcc@${{ matrix.version }} brew install gcc@${{ matrix.version }}
echo "::set-env name=CC::gcc-${{ matrix.version }}" echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
echo "::set-env name=CXX::g++-${{ matrix.version }}" echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
else else
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
echo "::set-env name=CC::clang" echo "CC=clang" >> $GITHUB_ENV
echo "::set-env name=CXX::clang++" echo "CXX=clang++" >> $GITHUB_ENV
fi fi
- name: Set GTSAM_WITH_TBB Flag - name: Set GTSAM_WITH_TBB Flag
if: matrix.flag == 'tbb' if: matrix.flag == 'tbb'
run: | run: |
echo "::set-env name=GTSAM_WITH_TBB::ON" echo "GTSAM_WITH_TBB=ON" >> $GITHUB_ENV
echo "GTSAM Uses TBB" echo "GTSAM Uses TBB"
- name: Build (Linux) - name: Build (Linux)
if: runner.os == 'Linux' if: runner.os == 'Linux'
@ -105,4 +110,4 @@ jobs:
- name: Build (macOS) - name: Build (macOS)
if: runner.os == 'macOS' if: runner.os == 'macOS'
run: | run: |
bash .github/scripts/python.sh bash .github/scripts/python.sh

View File

@ -24,6 +24,7 @@ jobs:
ubuntu-gcc-deprecated, ubuntu-gcc-deprecated,
ubuntu-gcc-quaternions, ubuntu-gcc-quaternions,
ubuntu-gcc-tbb, ubuntu-gcc-tbb,
ubuntu-cayleymap,
] ]
build_type: [Debug, Release] build_type: [Debug, Release]
@ -47,6 +48,12 @@ jobs:
version: "9" version: "9"
flag: tbb flag: tbb
- name: ubuntu-cayleymap
os: ubuntu-18.04
compiler: gcc
version: "9"
flag: cayley
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@master uses: actions/checkout@master
@ -56,23 +63,25 @@ jobs:
run: | run: |
# LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository.
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421
gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add -
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
fi fi
sudo apt-get -y update sudo apt-get -y update
sudo apt install cmake build-essential pkg-config libpython-dev python-numpy sudo apt install cmake build-essential pkg-config libpython-dev python-numpy
echo "::set-env name=BOOST_ROOT::$(echo $BOOST_ROOT_1_69_0)" echo "BOOST_ROOT=$(echo $BOOST_ROOT_1_72_0)" >> $GITHUB_ENV
echo "::set-env name=LD_LIBRARY_PATH::$(echo $BOOST_ROOT_1_69_0/lib)" echo "LD_LIBRARY_PATH=$(echo $BOOST_ROOT_1_72_0/lib)" >> $GITHUB_ENV
if [ "${{ matrix.compiler }}" = "gcc" ]; then if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
echo "::set-env name=CC::gcc-${{ matrix.version }}" echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
echo "::set-env name=CXX::g++-${{ matrix.version }}" echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
else else
sudo apt-get install -y clang-${{ matrix.version }} g++-multilib sudo apt-get install -y clang-${{ matrix.version }} g++-multilib
echo "::set-env name=CC::clang-${{ matrix.version }}" echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV
echo "::set-env name=CXX::clang++-${{ matrix.version }}" echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV
fi fi
- name: Install (macOS) - name: Install (macOS)
@ -81,32 +90,39 @@ jobs:
brew install cmake ninja boost brew install cmake ninja boost
if [ "${{ matrix.compiler }}" = "gcc" ]; then if [ "${{ matrix.compiler }}" = "gcc" ]; then
brew install gcc@${{ matrix.version }} brew install gcc@${{ matrix.version }}
echo "::set-env name=CC::gcc-${{ matrix.version }}" echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
echo "::set-env name=CXX::g++-${{ matrix.version }}" echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
else else
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
echo "::set-env name=CC::clang" echo "CC=clang" >> $GITHUB_ENV
echo "::set-env name=CXX::clang++" echo "CXX=clang++" >> $GITHUB_ENV
fi fi
- name: Set Allow Deprecated Flag - name: Set Allow Deprecated Flag
if: matrix.flag == 'deprecated' if: matrix.flag == 'deprecated'
run: | run: |
echo "::set-env name=GTSAM_ALLOW_DEPRECATED_SINCE_V41::ON" echo "GTSAM_ALLOW_DEPRECATED_SINCE_V41=ON" >> $GITHUB_ENV
echo "Allow deprecated since version 4.1" echo "Allow deprecated since version 4.1"
- name: Set Use Quaternions Flag - name: Set Use Quaternions Flag
if: matrix.flag == 'quaternions' if: matrix.flag == 'quaternions'
run: | run: |
echo "::set-env name=GTSAM_USE_QUATERNIONS::ON" echo "GTSAM_USE_QUATERNIONS=ON" >> $GITHUB_ENV
echo "Use Quaternions for rotations" echo "Use Quaternions for rotations"
- name: Set GTSAM_WITH_TBB Flag - name: Set GTSAM_WITH_TBB Flag
if: matrix.flag == 'tbb' if: matrix.flag == 'tbb'
run: | run: |
echo "::set-env name=GTSAM_WITH_TBB::ON" echo "GTSAM_WITH_TBB=ON" >> $GITHUB_ENV
echo "GTSAM Uses TBB" echo "GTSAM Uses TBB"
- name: Use Cayley Transform for Rot3
if: matrix.flag == 'cayley'
run: |
echo "GTSAM_POSE3_EXPMAP=OFF" >> $GITHUB_ENV
echo "GTSAM_ROT3_EXPMAP=OFF" >> $GITHUB_ENV
echo "GTSAM Uses Cayley map for Rot3"
- name: Build & Test - name: Build & Test
run: | run: |
bash .github/scripts/unix.sh -t bash .github/scripts/unix.sh -t

View File

@ -18,16 +18,19 @@ jobs:
# Github Actions requires a single row to be added to the build matrix. # Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions. # See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [ name: [
windows-2016-cl, #TODO This build keeps timing out, need to understand why.
# windows-2016-cl,
windows-2019-cl, windows-2019-cl,
] ]
build_type: [Debug, Release] build_type: [Debug, Release]
build_unstable: [ON] build_unstable: [ON]
include: include:
- name: windows-2016-cl
os: windows-2016 #TODO This build keeps timing out, need to understand why.
compiler: cl # - name: windows-2016-cl
# os: windows-2016
# compiler: cl
- name: windows-2019-cl - name: windows-2019-cl
os: windows-2019 os: windows-2019
@ -50,17 +53,17 @@ jobs:
# See: https://github.com/DaanDeMeyer/doctest/runs/231595515 # See: https://github.com/DaanDeMeyer/doctest/runs/231595515
# See: https://github.community/t5/GitHub-Actions/Something-is-wrong-with-the-chocolatey-installed-version-of-gcc/td-p/32413 # See: https://github.community/t5/GitHub-Actions/Something-is-wrong-with-the-chocolatey-installed-version-of-gcc/td-p/32413
scoop install gcc --global scoop install gcc --global
echo "::set-env name=CC::gcc" echo "CC=gcc" >> $GITHUB_ENV
echo "::set-env name=CXX::g++" echo "CXX=g++" >> $GITHUB_ENV
} elseif ("${{ matrix.compiler }}" -eq "clang") { } elseif ("${{ matrix.compiler }}" -eq "clang") {
echo "::set-env name=CC::clang" echo "CC=clang" >> $GITHUB_ENV
echo "::set-env name=CXX::clang++" echo "CXX=clang++" >> $GITHUB_ENV
} else { } else {
echo "::set-env name=CC::${{ matrix.compiler }}" echo "CC=${{ matrix.compiler }}" >> $GITHUB_ENV
echo "::set-env name=CXX::${{ matrix.compiler }}" echo "CXX=${{ matrix.compiler }}" >> $GITHUB_ENV
} }
# Scoop modifies the PATH so we make the modified PATH global. # Scoop modifies the PATH so we make the modified PATH global.
echo "::set-env name=PATH::$env:PATH" echo "$env:PATH" >> $GITHUB_PATH
- name: Build (Windows) - name: Build (Windows)
if: runner.os == 'Windows' if: runner.os == 'Windows'
run: | run: |

View File

@ -1,11 +1,11 @@
# This triggers Cython builds on `gtsam-manylinux-build` # This triggers Python builds on `gtsam-manylinux-build`
name: Trigger Python Builds name: Trigger Python Builds
on: on:
push: push:
branches: branches:
- develop - develop
jobs: jobs:
triggerCython: triggerPython:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Repository Dispatch - name: Repository Dispatch
@ -13,5 +13,5 @@ jobs:
with: with:
token: ${{ secrets.PYTHON_CI_REPO_ACCESS_TOKEN }} token: ${{ secrets.PYTHON_CI_REPO_ACCESS_TOKEN }}
repository: borglab/gtsam-manylinux-build repository: borglab/gtsam-manylinux-build
event-type: cython-wrapper event-type: python-wrapper
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'

6
.gitignore vendored
View File

@ -9,12 +9,6 @@
*.txt.user *.txt.user
*.txt.user.6d59f0c *.txt.user.6d59f0c
*.pydevproject *.pydevproject
cython/venv
cython/gtsam.cpp
cython/gtsam.cpython-35m-darwin.so
cython/gtsam.pyx
cython/gtsam.so
cython/gtsam_wrapper.pxd
.vscode .vscode
.env .env
/.vs/ /.vs/

View File

@ -22,17 +22,10 @@ set (CMAKE_PROJECT_VERSION_PATCH ${GTSAM_VERSION_PATCH})
############################################################################### ###############################################################################
# Gather information, perform checks, set defaults # Gather information, perform checks, set defaults
# Set the default install path to home
#set (CMAKE_INSTALL_PREFIX ${HOME} CACHE PATH "Install prefix for library")
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${CMAKE_CURRENT_SOURCE_DIR}/cmake") set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
include(GtsamMakeConfigFile) include(GtsamMakeConfigFile)
include(GNUInstallDirs) include(GNUInstallDirs)
# Record the root dir for gtsam - needed during external builds, e.g., ROS
set(GTSAM_SOURCE_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR})
message(STATUS "GTSAM_SOURCE_ROOT_DIR: [${GTSAM_SOURCE_ROOT_DIR}]")
# Load build type flags and default to Debug mode # Load build type flags and default to Debug mode
include(GtsamBuildTypes) include(GtsamBuildTypes)
@ -45,406 +38,21 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ") message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ")
endif() endif()
# See whether gtsam_unstable is available (it will be present only if we're using a git checkout) include(cmake/HandleBoost.cmake) # Boost
if(EXISTS "${PROJECT_SOURCE_DIR}/gtsam_unstable" AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/gtsam_unstable") include(cmake/HandleCCache.cmake) # ccache
set(GTSAM_UNSTABLE_AVAILABLE 1) include(cmake/HandleCPack.cmake) # CPack
else() include(cmake/HandleEigen.cmake) # Eigen3
set(GTSAM_UNSTABLE_AVAILABLE 0) include(cmake/HandleGeneralOptions.cmake) # CMake build options
endif() include(cmake/HandleMKL.cmake) # MKL
include(cmake/HandleOpenMP.cmake) # OpenMP
# ---------------------------------------------------------------------------- include(cmake/HandlePerfTools.cmake) # Google perftools
# Uninstall target, for "make uninstall" include(cmake/HandlePython.cmake) # Python options and commands
# ---------------------------------------------------------------------------- include(cmake/HandleTBB.cmake) # TBB
configure_file( include(cmake/HandleUninstall.cmake) # for "make uninstall"
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" include(cmake/HandleAllocators.cmake) # Must be after tbb, pertools
IMMEDIATE @ONLY)
include(cmake/HandleGlobalBuildFlags.cmake) # Build flags
add_custom_target(uninstall
"${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake")
###############################################################################
# Set up options
# Configurable Options
if(GTSAM_UNSTABLE_AVAILABLE)
option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON)
option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON)
option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF)
endif()
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
if(NOT MSVC AND NOT XCODE_VERSION)
option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
endif()
if(NOT MSVC AND NOT XCODE_VERSION)
# Set the build type to upper case for downstream use
string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_UPPER)
# Set the GTSAM_BUILD_TAG variable.
# If build type is Release, set to blank (""), else set to the build type.
if(${CMAKE_BUILD_TYPE_UPPER} STREQUAL "RELEASE")
set(GTSAM_BUILD_TAG "") # Don't create release mode tag on installed directory
else()
set(GTSAM_BUILD_TAG "${CMAKE_BUILD_TYPE}")
endif()
endif()
# Check / set dependent variables for MATLAB wrapper
if(GTSAM_INSTALL_MATLAB_TOOLBOX)
find_package(Matlab COMPONENTS MEX_COMPILER REQUIRED)
if(NOT Matlab_MEX_COMPILER)
message(FATAL_ERROR "Cannot find MEX compiler binary. Please check your Matlab installation and ensure MEX in installed as well.")
endif()
if(GTSAM_BUILD_TYPE_POSTFIXES)
set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX})
endif()
if(NOT BUILD_SHARED_LIBS)
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.")
endif()
endif()
set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of Python to build the wrappers against.")
if(GTSAM_BUILD_PYTHON)
# Get info about the Python3 interpreter
# https://cmake.org/cmake/help/latest/module/FindPython3.html#module:FindPython3
find_package(Python3 COMPONENTS Interpreter Development)
if(NOT ${Python3_FOUND})
message(FATAL_ERROR "Cannot find Python3 interpreter. Please install Python >= 3.6.")
endif()
if(${GTSAM_PYTHON_VERSION} STREQUAL "Default")
set(GTSAM_PYTHON_VERSION "${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}"
CACHE
STRING
"The version of Python to build the wrappers against."
FORCE)
endif()
if(GTSAM_UNSTABLE_BUILD_PYTHON)
if (NOT GTSAM_BUILD_UNSTABLE)
message(WARNING "GTSAM_UNSTABLE_BUILD_PYTHON requires the unstable module to be enabled.")
set(GTSAM_UNSTABLE_BUILD_PYTHON OFF)
endif()
endif()
set(GTSAM_PY_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/python")
endif()
# Flags for choosing default packaging tools
set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator")
set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator")
if (CMAKE_GENERATOR STREQUAL "Ninja" AND
((CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) OR
(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5)))
# Force colored warnings in Ninja's output, if the compiler has -fdiagnostics-color support.
# Rationale in https://github.com/ninja-build/ninja/issues/814
add_compile_options(-fdiagnostics-color=always)
endif()
###############################################################################
# Find boost
# To change the path for boost, you will need to set:
# BOOST_ROOT: path to install prefix for boost
# Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT
if(MSVC)
# By default, boost only builds static libraries on windows
set(Boost_USE_STATIC_LIBS ON) # only find static libs
# If we ever reset above on windows and, ...
# If we use Boost shared libs, disable auto linking.
# Some libraries, at least Boost Program Options, rely on this to export DLL symbols.
if(NOT Boost_USE_STATIC_LIBS)
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK)
endif()
# Virtual memory range for PCH exceeded on VS2015
if(MSVC_VERSION LESS 1910) # older than VS2017
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295)
endif()
endif()
# If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT()
# or explicit instantiation will generate build errors.
# See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017
#
if(MSVC AND BUILD_SHARED_LIBS)
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
endif()
# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such.
set(BOOST_FIND_MINIMUM_VERSION 1.58)
set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex)
find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS})
# Required components
if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR
NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY)
message(FATAL_ERROR "Missing required Boost components >= v1.58, please install/upgrade Boost or configure your search paths.")
endif()
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
set(GTSAM_BOOST_LIBRARIES
Boost::serialization
Boost::system
Boost::filesystem
Boost::thread
Boost::date_time
Boost::regex
)
if (GTSAM_DISABLE_NEW_TIMERS)
message("WARNING: GTSAM timing instrumentation manually disabled")
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS)
else()
if(Boost_TIMER_LIBRARY)
list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono)
else()
list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt
message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.")
endif()
endif()
###############################################################################
# Find TBB
find_package(TBB 4.4 COMPONENTS tbb tbbmalloc)
# Set up variables if we're using TBB
if(TBB_FOUND AND GTSAM_WITH_TBB)
set(GTSAM_USE_TBB 1) # This will go into config.h
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
set(TBB_GREATER_EQUAL_2020 1)
else()
set(TBB_GREATER_EQUAL_2020 0)
endif()
# all definitions and link requisites will go via imported targets:
# tbb & tbbmalloc
list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
else()
set(GTSAM_USE_TBB 0) # This will go into config.h
endif()
###############################################################################
# Prohibit Timing build mode in combination with TBB
if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing"))
message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.")
endif()
###############################################################################
# Find Google perftools
find_package(GooglePerfTools)
###############################################################################
# Support ccache, if installed
if(NOT MSVC AND NOT XCODE_VERSION)
find_program(CCACHE_FOUND ccache)
if(CCACHE_FOUND)
if(GTSAM_BUILD_WITH_CCACHE)
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
else()
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "")
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "")
endif()
endif(CCACHE_FOUND)
endif()
###############################################################################
# Find MKL
find_package(MKL)
if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL)
set(GTSAM_USE_EIGEN_MKL 1) # This will go into config.h
set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL
list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES})
# --no-as-needed is required with gcc according to the MKL link advisor
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed")
endif()
else()
set(GTSAM_USE_EIGEN_MKL 0)
set(EIGEN_USE_MKL_ALL 0)
endif()
###############################################################################
# Find OpenMP (if we're also using MKL)
find_package(OpenMP) # do this here to generate correct message if disabled
if(GTSAM_WITH_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL)
if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP)
set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC ${OpenMP_CXX_FLAGS})
endif()
endif()
###############################################################################
# Option for using system Eigen or GTSAM-bundled Eigen
### These patches only affect usage of MKL. If you want to enable MKL, you *must*
### use our patched version of Eigen
### See: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=704 (Householder QR MKL selection)
### http://eigen.tuxfamily.org/bz/show_bug.cgi?id=705 (Fix MKL LLT return code)
option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF)
option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF)
# Switch for using system Eigen or GTSAM-bundled Eigen
if(GTSAM_USE_SYSTEM_EIGEN)
find_package(Eigen3 REQUIRED)
# Use generic Eigen include paths e.g. <Eigen/Core>
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}")
# check if MKL is also enabled - can have one or the other, but not both!
# Note: Eigen >= v3.2.5 includes our patches
if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5))
message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL")
endif()
# Check for Eigen version which doesn't work with MKL
# See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details.
if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4))
message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.")
endif()
# The actual include directory (for BUILD cmake target interface):
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}")
else()
# Use bundled Eigen include path.
# Clear any variables set by FindEigen3
if(EIGEN3_INCLUDE_DIR)
set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE)
endif()
# set full path to be used by external projects
# this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/")
# The actual include directory (for BUILD cmake target interface):
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/")
endif()
# Detect Eigen version:
set(EIGEN_VER_H "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}/Eigen/src/Core/util/Macros.h")
if (EXISTS ${EIGEN_VER_H})
file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION)
# Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION XX", etc...
string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}")
string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}")
string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}")
string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}")
string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}")
string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}")
set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}")
message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}")
else()
message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`")
endif ()
if (MSVC)
if (BUILD_SHARED_LIBS)
# mute eigen static assert to avoid errors in shared lib
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
endif()
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen
endif()
if (APPLE AND BUILD_SHARED_LIBS)
# Set the default install directory on macOS
set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib")
endif()
###############################################################################
# Global compile options
# Build list of possible allocators
set(possible_allocators "")
if(GTSAM_USE_TBB)
list(APPEND possible_allocators TBB)
set(preferred_allocator TBB)
else()
list(APPEND possible_allocators BoostPool STL)
set(preferred_allocator STL)
endif()
if(GOOGLE_PERFTOOLS_FOUND)
list(APPEND possible_allocators tcmalloc)
endif()
# Check if current allocator choice is valid and set cache option
list(FIND possible_allocators "${GTSAM_DEFAULT_ALLOCATOR}" allocator_valid)
if(allocator_valid EQUAL -1)
set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE)
else()
set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator")
endif()
set_property(CACHE GTSAM_DEFAULT_ALLOCATOR PROPERTY STRINGS ${possible_allocators})
mark_as_advanced(GTSAM_DEFAULT_ALLOCATOR)
# Define compile flags depending on allocator
if("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "BoostPool")
set(GTSAM_ALLOCATOR_BOOSTPOOL 1)
elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "STL")
set(GTSAM_ALLOCATOR_STL 1)
elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "TBB")
set(GTSAM_ALLOCATOR_TBB 1)
elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc")
set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator
list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc")
endif()
if(MSVC)
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS)
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code
endif()
# GCC 4.8+ complains about local typedefs which we use for shared_ptr etc.
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8)
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs)
endif()
endif()
# As of XCode 7, clang also complains about this
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0)
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs)
endif()
endif()
if(GTSAM_ENABLE_CONSISTENCY_CHECKS)
# This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS)
endif()
############################################################################### ###############################################################################
# Add components # Add components
@ -484,7 +92,6 @@ endif()
GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in") GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake) export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake)
# Check for doxygen availability - optional dependency # Check for doxygen availability - optional dependency
find_package(Doxygen) find_package(Doxygen)
@ -496,146 +103,11 @@ endif()
# CMake Tools # CMake Tools
add_subdirectory(cmake) add_subdirectory(cmake)
###############################################################################
# Set up CPack
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM")
set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology")
set(CPACK_PACKAGE_CONTACT "Frank Dellaert, dellaert@cc.gatech.edu")
set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md")
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE")
set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR})
set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH})
set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}")
#set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory
#set(CPACK_INSTALLED_DIRECTORIES ".") # FIXME: throws error
set(CPACK_SOURCE_IGNORE_FILES "/build*;/\\\\.;/makestats.sh$")
set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/gtsam_unstable/")
set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/package_scripts/")
set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
#set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs
# Deb-package specific cpack
set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev")
set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.58)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)")
###############################################################################
# Print configuration variables # Print configuration variables
message(STATUS "===============================================================") include(cmake/HandlePrintConfiguration.cmake)
message(STATUS "================ Configuration Options ======================")
print_config("CMAKE_CXX_COMPILER_ID type" "${CMAKE_CXX_COMPILER_ID}")
print_config("CMAKE_CXX_COMPILER_VERSION" "${CMAKE_CXX_COMPILER_VERSION}")
print_config("CMake version" "${CMAKE_VERSION}")
print_config("CMake generator" "${CMAKE_GENERATOR}")
print_config("CMake build tool" "${CMAKE_BUILD_TOOL}")
message(STATUS "Build flags ")
print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests")
print_enabled_config(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all'")
print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'")
if (DOXYGEN_FOUND)
print_enabled_config(${GTSAM_BUILD_DOCS} "Build Docs")
endif()
print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries")
print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name")
if(GTSAM_UNSTABLE_AVAILABLE)
print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ")
print_enabled_config(${GTSAM_UNSTABLE_BUILD_PYTHON} "Build GTSAM unstable Python ")
print_enabled_config(${GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX} "Build MATLAB Toolbox for unstable")
endif()
if(NOT MSVC AND NOT XCODE_VERSION)
print_enabled_config(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ")
print_config("Build type" "${CMAKE_BUILD_TYPE}")
print_config("C compilation flags" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
endif()
print_build_options_for_target(gtsam)
print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
if(GTSAM_USE_TBB)
print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})")
elseif(TBB_FOUND)
print_config("Use Intel TBB" "TBB (Version: ${TBB_VERSION}) found but GTSAM_WITH_TBB is disabled")
else()
print_config("Use Intel TBB" "TBB not found")
endif()
if(GTSAM_USE_EIGEN_MKL)
print_config("Eigen will use MKL" "Yes")
elseif(MKL_FOUND)
print_config("Eigen will use MKL" "MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
else()
print_config("Eigen will use MKL" "MKL not found")
endif()
if(GTSAM_USE_EIGEN_MKL_OPENMP)
print_config("Eigen will use MKL and OpenMP" "Yes")
elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL)
print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
elseif(OPENMP_FOUND AND NOT MKL_FOUND)
print_config("Eigen will use MKL and OpenMP" "OpenMP found but MKL not found")
elseif(OPENMP_FOUND)
print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
else()
print_config("Eigen will use MKL and OpenMP" "OpenMP not found")
endif()
print_config("Default allocator" "${GTSAM_DEFAULT_ALLOCATOR}")
if(GTSAM_THROW_CHEIRALITY_EXCEPTION)
print_config("Cheirality exceptions enabled" "YES")
else()
print_config("Cheirality exceptions enabled" "NO")
endif()
if(NOT MSVC AND NOT XCODE_VERSION)
if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
print_config("Build with ccache" "Yes")
elseif(CCACHE_FOUND)
print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
else()
print_config("Build with ccache" "No")
endif()
endif()
message(STATUS "Packaging flags")
print_config("CPack Source Generator" "${CPACK_SOURCE_GENERATOR}")
print_config("CPack Generator" "${CPACK_GENERATOR}")
message(STATUS "GTSAM flags ")
print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ")
print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1")
print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
message(STATUS "MATLAB toolbox flags")
print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ")
if (${GTSAM_INSTALL_MATLAB_TOOLBOX})
print_config("MATLAB root" "${MATLAB_ROOT}")
print_config("MEX binary" "${MEX_COMMAND}")
endif()
message(STATUS "Python toolbox flags ")
print_enabled_config(${GTSAM_BUILD_PYTHON} "Build Python module with pybind ")
if(GTSAM_BUILD_PYTHON)
print_config("Python version" ${GTSAM_PYTHON_VERSION})
endif()
message(STATUS "===============================================================")
# Print warnings at the end # Print warnings at the end
if(GTSAM_WITH_TBB AND NOT TBB_FOUND) include(cmake/HandleFinalChecks.cmake)
message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.")
endif()
if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND)
message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.")
endif()
if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND)
message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.")
endif()
# Include CPack *after* all flags # Include CPack *after* all flags
include(CPack) include(CPack)

View File

@ -173,7 +173,7 @@ NOTE: If _GLIBCXX_DEBUG is used to compile gtsam, anything that links against g
Intel has a guide for installing MKL on Linux through APT repositories at <https://software.intel.com/en-us/articles/installing-intel-free-libs-and-python-apt-repo>. Intel has a guide for installing MKL on Linux through APT repositories at <https://software.intel.com/en-us/articles/installing-intel-free-libs-and-python-apt-repo>.
After following the instructions, add the following to your `~/.bashrc` (and afterwards, open a new terminal before compiling GTSAM): After following the instructions, add the following to your `~/.bashrc` (and afterwards, open a new terminal before compiling GTSAM):
`LD_PRELOAD` need only be set if you are building the cython wrapper to use GTSAM from python. `LD_PRELOAD` need only be set if you are building the python wrapper to use GTSAM from python.
```sh ```sh
source /opt/intel/mkl/bin/mklvars.sh intel64 source /opt/intel/mkl/bin/mklvars.sh intel64
export LD_PRELOAD="$LD_PRELOAD:/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_sequential.so" export LD_PRELOAD="$LD_PRELOAD:/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_sequential.so"
@ -190,6 +190,6 @@ Failing to specify `LD_PRELOAD` may lead to errors such as:
`ImportError: /opt/intel/mkl/lib/intel64/libmkl_vml_avx2.so: undefined symbol: mkl_serv_getenv` `ImportError: /opt/intel/mkl/lib/intel64/libmkl_vml_avx2.so: undefined symbol: mkl_serv_getenv`
or or
`Intel MKL FATAL ERROR: Cannot load libmkl_avx2.so or libmkl_def.so.` `Intel MKL FATAL ERROR: Cannot load libmkl_avx2.so or libmkl_def.so.`
when importing GTSAM using the cython wrapper in python. when importing GTSAM using the python wrapper.

View File

@ -19,7 +19,6 @@ install(FILES
GtsamMatlabWrap.cmake GtsamMatlabWrap.cmake
GtsamTesting.cmake GtsamTesting.cmake
GtsamPrinting.cmake GtsamPrinting.cmake
FindCython.cmake
FindNumPy.cmake FindNumPy.cmake
README.html README.html
DESTINATION "${SCRIPT_INSTALL_DIR}/GTSAMCMakeTools") DESTINATION "${SCRIPT_INSTALL_DIR}/GTSAMCMakeTools")

View File

@ -1,81 +0,0 @@
# Modifed from: https://github.com/nest/nest-simulator/blob/master/cmake/FindCython.cmake
#
# Find the Cython compiler.
#
# This code sets the following variables:
#
# CYTHON_FOUND
# CYTHON_PATH
# CYTHON_EXECUTABLE
# CYTHON_VERSION
#
# See also UseCython.cmake
#=============================================================================
# Copyright 2011 Kitware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
# Use the Cython executable that lives next to the Python executable
# if it is a local installation.
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
find_package(PythonInterp)
else()
find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT)
endif()
if ( PYTHONINTERP_FOUND )
execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c"
"import Cython; print(Cython.__path__[0])"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE CYTHON_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE
)
endif ()
# RESULT=0 means ok
if ( NOT RESULT )
get_filename_component( _python_path ${PYTHON_EXECUTABLE} PATH )
find_program( CYTHON_EXECUTABLE
NAMES cython cython.bat cython3
HINTS ${_python_path}
)
endif ()
# RESULT=0 means ok
if ( NOT RESULT )
execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c"
"import Cython; print(Cython.__version__)"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE CYTHON_VAR_OUTPUT
ERROR_VARIABLE CYTHON_VAR_OUTPUT
OUTPUT_STRIP_TRAILING_WHITESPACE
)
if ( RESULT EQUAL 0 )
string( REGEX REPLACE ".* ([0-9]+\\.[0-9]+(\\.[0-9]+)?).*" "\\1"
CYTHON_VERSION "${CYTHON_VAR_OUTPUT}" )
endif ()
endif ()
include( FindPackageHandleStandardArgs )
find_package_handle_standard_args( Cython
FOUND_VAR
CYTHON_FOUND
REQUIRED_VARS
CYTHON_PATH
CYTHON_EXECUTABLE
VERSION_VAR
CYTHON_VERSION
)

View File

@ -1,3 +1,5 @@
include(CheckCXXCompilerFlag) # for check_cxx_compiler_flag()
# Set cmake policy to recognize the AppleClang compiler # Set cmake policy to recognize the AppleClang compiler
# independently from the Clang compiler. # independently from the Clang compiler.
if(POLICY CMP0025) if(POLICY CMP0025)
@ -105,11 +107,14 @@ if(MSVC)
else() else()
# Common to all configurations, next for each configuration: # Common to all configurations, next for each configuration:
if ( if (NOT MSVC)
((CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12.0.0)) OR check_cxx_compiler_flag(-Wsuggest-override COMPILER_HAS_WSUGGEST_OVERRIDE)
(CMAKE_CXX_COMPILER_ID MATCHES "GNU") check_cxx_compiler_flag(-Wmissing COMPILER_HAS_WMISSING_OVERRIDE)
) if (COMPILER_HAS_WSUGGEST_OVERRIDE)
set(flag_override_ -Wsuggest-override) # -Werror=suggest-override: Add again someday set(flag_override_ -Wsuggest-override) # -Werror=suggest-override: Add again someday
elseif(COMPILER_HAS_WMISSING_OVERRIDE)
set(flag_override_ -Wmissing-override) # -Werror=missing-override: Add again someday
endif()
endif() endif()
set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON
@ -263,3 +268,17 @@ function(gtsam_apply_build_flags target_name_)
target_compile_options(${target_name_} PRIVATE ${GTSAM_COMPILE_OPTIONS_PRIVATE}) target_compile_options(${target_name_} PRIVATE ${GTSAM_COMPILE_OPTIONS_PRIVATE})
endfunction(gtsam_apply_build_flags) endfunction(gtsam_apply_build_flags)
if(NOT MSVC AND NOT XCODE_VERSION)
# Set the build type to upper case for downstream use
string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_UPPER)
# Set the GTSAM_BUILD_TAG variable.
# If build type is Release, set to blank (""), else set to the build type.
if(${CMAKE_BUILD_TYPE_UPPER} STREQUAL "RELEASE")
set(GTSAM_BUILD_TAG "") # Don't create release mode tag on installed directory
else()
set(GTSAM_BUILD_TAG "${CMAKE_BUILD_TYPE}")
endif()
endif()

View File

@ -0,0 +1,34 @@
# Build list of possible allocators
set(possible_allocators "")
if(GTSAM_USE_TBB)
list(APPEND possible_allocators TBB)
set(preferred_allocator TBB)
else()
list(APPEND possible_allocators BoostPool STL)
set(preferred_allocator STL)
endif()
if(GOOGLE_PERFTOOLS_FOUND)
list(APPEND possible_allocators tcmalloc)
endif()
# Check if current allocator choice is valid and set cache option
list(FIND possible_allocators "${GTSAM_DEFAULT_ALLOCATOR}" allocator_valid)
if(allocator_valid EQUAL -1)
set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE)
else()
set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator")
endif()
set_property(CACHE GTSAM_DEFAULT_ALLOCATOR PROPERTY STRINGS ${possible_allocators})
mark_as_advanced(GTSAM_DEFAULT_ALLOCATOR)
# Define compile flags depending on allocator
if("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "BoostPool")
set(GTSAM_ALLOCATOR_BOOSTPOOL 1)
elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "STL")
set(GTSAM_ALLOCATOR_STL 1)
elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "TBB")
set(GTSAM_ALLOCATOR_TBB 1)
elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc")
set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator
list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc")
endif()

56
cmake/HandleBoost.cmake Normal file
View File

@ -0,0 +1,56 @@
###############################################################################
# Find boost
# To change the path for boost, you will need to set:
# BOOST_ROOT: path to install prefix for boost
# Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT
if(MSVC)
# By default, boost only builds static libraries on windows
set(Boost_USE_STATIC_LIBS ON) # only find static libs
# If we ever reset above on windows and, ...
# If we use Boost shared libs, disable auto linking.
# Some libraries, at least Boost Program Options, rely on this to export DLL symbols.
if(NOT Boost_USE_STATIC_LIBS)
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK)
endif()
# Virtual memory range for PCH exceeded on VS2015
if(MSVC_VERSION LESS 1910) # older than VS2017
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295)
endif()
endif()
# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such.
set(BOOST_FIND_MINIMUM_VERSION 1.58)
set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex)
find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS})
# Required components
if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR
NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY)
message(FATAL_ERROR "Missing required Boost components >= v1.58, please install/upgrade Boost or configure your search paths.")
endif()
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
set(GTSAM_BOOST_LIBRARIES
Boost::serialization
Boost::system
Boost::filesystem
Boost::thread
Boost::date_time
Boost::regex
)
if (GTSAM_DISABLE_NEW_TIMERS)
message("WARNING: GTSAM timing instrumentation manually disabled")
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS)
else()
if(Boost_TIMER_LIBRARY)
list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono)
else()
list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt
message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.")
endif()
endif()

14
cmake/HandleCCache.cmake Normal file
View File

@ -0,0 +1,14 @@
###############################################################################
# Support ccache, if installed
if(NOT MSVC AND NOT XCODE_VERSION)
find_program(CCACHE_FOUND ccache)
if(CCACHE_FOUND)
if(GTSAM_BUILD_WITH_CCACHE)
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
else()
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "")
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "")
endif()
endif(CCACHE_FOUND)
endif()

28
cmake/HandleCPack.cmake Normal file
View File

@ -0,0 +1,28 @@
#JLBC: is all this actually used by someone? could it be removed?
# Flags for choosing default packaging tools
set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator")
set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator")
###############################################################################
# Set up CPack
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM")
set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology")
set(CPACK_PACKAGE_CONTACT "Frank Dellaert, dellaert@cc.gatech.edu")
set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md")
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE")
set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR})
set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH})
set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}")
#set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory
#set(CPACK_INSTALLED_DIRECTORIES ".") # FIXME: throws error
set(CPACK_SOURCE_IGNORE_FILES "/build*;/\\\\.;/makestats.sh$")
set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/gtsam_unstable/")
set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/package_scripts/")
set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
#set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs
# Deb-package specific cpack
set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev")
set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.58)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)")

77
cmake/HandleEigen.cmake Normal file
View File

@ -0,0 +1,77 @@
###############################################################################
# Option for using system Eigen or GTSAM-bundled Eigen
option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF)
if(NOT GTSAM_USE_SYSTEM_EIGEN)
# This option only makes sense if using the embedded copy of Eigen, it is
# used to decide whether to *install* the "unsupported" module:
option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF)
endif()
# Switch for using system Eigen or GTSAM-bundled Eigen
if(GTSAM_USE_SYSTEM_EIGEN)
find_package(Eigen3 REQUIRED)
# Use generic Eigen include paths e.g. <Eigen/Core>
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}")
# check if MKL is also enabled - can have one or the other, but not both!
# Note: Eigen >= v3.2.5 includes our patches
if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5))
message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL")
endif()
# Check for Eigen version which doesn't work with MKL
# See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details.
if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4))
message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.")
endif()
# The actual include directory (for BUILD cmake target interface):
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}")
else()
# Use bundled Eigen include path.
# Clear any variables set by FindEigen3
if(EIGEN3_INCLUDE_DIR)
set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE)
endif()
# set full path to be used by external projects
# this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/")
# The actual include directory (for BUILD cmake target interface):
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/")
endif()
# Detect Eigen version:
set(EIGEN_VER_H "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}/Eigen/src/Core/util/Macros.h")
if (EXISTS ${EIGEN_VER_H})
file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION)
# Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION XX", etc...
string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}")
string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}")
string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}")
string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}")
string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}")
string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}")
set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}")
message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}")
else()
message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`")
endif ()
if (MSVC)
if (BUILD_SHARED_LIBS)
# mute eigen static assert to avoid errors in shared lib
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
endif()
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen
endif()

View File

@ -0,0 +1,10 @@
# Print warnings at the end
if(GTSAM_WITH_TBB AND NOT TBB_FOUND)
message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.")
endif()
if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND)
message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.")
endif()
if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND)
message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.")
endif()

View File

@ -0,0 +1,55 @@
###############################################################################
# Set up options
# See whether gtsam_unstable is available (it will be present only if we're using a git checkout)
if(EXISTS "${PROJECT_SOURCE_DIR}/gtsam_unstable" AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/gtsam_unstable")
set(GTSAM_UNSTABLE_AVAILABLE 1)
else()
set(GTSAM_UNSTABLE_AVAILABLE 0)
endif()
# Configurable Options
if(GTSAM_UNSTABLE_AVAILABLE)
option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON)
option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON)
option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF)
endif()
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
if(NOT MSVC AND NOT XCODE_VERSION)
option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
endif()
# Enable GTSAM_ROT3_EXPMAP if GTSAM_POSE3_EXPMAP is enabled, and vice versa.
if(GTSAM_POSE3_EXPMAP)
message(STATUS "GTSAM_POSE3_EXPMAP=ON, enabling GTSAM_ROT3_EXPMAP as well")
set(GTSAM_ROT3_EXPMAP 1 CACHE BOOL "" FORCE)
elseif(GTSAM_ROT3_EXPMAP)
message(STATUS "GTSAM_ROT3_EXPMAP=ON, enabling GTSAM_POSE3_EXPMAP as well")
set(GTSAM_POSE3_EXPMAP 1 CACHE BOOL "" FORCE)
endif()
# Options relating to MATLAB wrapper
# TODO: Check for matlab mex binary before handling building of binaries
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of Python to build the wrappers against.")
# Check / set dependent variables for MATLAB wrapper
if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_BUILD_TYPE_POSTFIXES)
set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX})
endif()
if(GTSAM_INSTALL_MATLAB_TOOLBOX AND NOT BUILD_SHARED_LIBS)
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.")
endif()

View File

@ -0,0 +1,52 @@
# JLBC: These should ideally be ported to "modern cmake" via target properties.
#
if (CMAKE_GENERATOR STREQUAL "Ninja" AND
((CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) OR
(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5)))
# Force colored warnings in Ninja's output, if the compiler has -fdiagnostics-color support.
# Rationale in https://github.com/ninja-build/ninja/issues/814
add_compile_options(-fdiagnostics-color=always)
endif()
# If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT()
# or explicit instantiation will generate build errors.
# See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017
#
if(MSVC AND BUILD_SHARED_LIBS)
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
endif()
if (APPLE AND BUILD_SHARED_LIBS)
# Set the default install directory on macOS
set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib")
endif()
###############################################################################
# Global compile options
if(MSVC)
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS)
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code
endif()
# GCC 4.8+ complains about local typedefs which we use for shared_ptr etc.
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8)
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs)
endif()
endif()
# As of XCode 7, clang also complains about this
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0)
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs)
endif()
endif()
if(GTSAM_ENABLE_CONSISTENCY_CHECKS)
# This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS)
endif()

17
cmake/HandleMKL.cmake Normal file
View File

@ -0,0 +1,17 @@
###############################################################################
# Find MKL
find_package(MKL)
if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL)
set(GTSAM_USE_EIGEN_MKL 1) # This will go into config.h
set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL
list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES})
# --no-as-needed is required with gcc according to the MKL link advisor
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed")
endif()
else()
set(GTSAM_USE_EIGEN_MKL 0)
set(EIGEN_USE_MKL_ALL 0)
endif()

11
cmake/HandleOpenMP.cmake Normal file
View File

@ -0,0 +1,11 @@
###############################################################################
# Find OpenMP (if we're also using MKL)
find_package(OpenMP) # do this here to generate correct message if disabled
if(GTSAM_WITH_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL)
if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP)
set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC ${OpenMP_CXX_FLAGS})
endif()
endif()

View File

@ -0,0 +1,4 @@
###############################################################################
# Find Google perftools
find_package(GooglePerfTools)

View File

@ -0,0 +1,104 @@
###############################################################################
# Print configuration variables
message(STATUS "===============================================================")
message(STATUS "================ Configuration Options ======================")
print_config("CMAKE_CXX_COMPILER_ID type" "${CMAKE_CXX_COMPILER_ID}")
print_config("CMAKE_CXX_COMPILER_VERSION" "${CMAKE_CXX_COMPILER_VERSION}")
print_config("CMake version" "${CMAKE_VERSION}")
print_config("CMake generator" "${CMAKE_GENERATOR}")
print_config("CMake build tool" "${CMAKE_BUILD_TOOL}")
message(STATUS "Build flags ")
print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests")
print_enabled_config(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all'")
print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'")
if (DOXYGEN_FOUND)
print_enabled_config(${GTSAM_BUILD_DOCS} "Build Docs")
endif()
print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries")
print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name")
if(GTSAM_UNSTABLE_AVAILABLE)
print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ")
print_enabled_config(${GTSAM_UNSTABLE_BUILD_PYTHON} "Build GTSAM unstable Python ")
print_enabled_config(${GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX} "Build MATLAB Toolbox for unstable")
endif()
if(NOT MSVC AND NOT XCODE_VERSION)
print_enabled_config(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ")
print_config("Build type" "${CMAKE_BUILD_TYPE}")
print_config("C compilation flags" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
endif()
print_build_options_for_target(gtsam)
print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
if(GTSAM_USE_TBB)
print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})")
elseif(TBB_FOUND)
print_config("Use Intel TBB" "TBB (Version: ${TBB_VERSION}) found but GTSAM_WITH_TBB is disabled")
else()
print_config("Use Intel TBB" "TBB not found")
endif()
if(GTSAM_USE_EIGEN_MKL)
print_config("Eigen will use MKL" "Yes")
elseif(MKL_FOUND)
print_config("Eigen will use MKL" "MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
else()
print_config("Eigen will use MKL" "MKL not found")
endif()
if(GTSAM_USE_EIGEN_MKL_OPENMP)
print_config("Eigen will use MKL and OpenMP" "Yes")
elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL)
print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
elseif(OPENMP_FOUND AND NOT MKL_FOUND)
print_config("Eigen will use MKL and OpenMP" "OpenMP found but MKL not found")
elseif(OPENMP_FOUND)
print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
else()
print_config("Eigen will use MKL and OpenMP" "OpenMP not found")
endif()
print_config("Default allocator" "${GTSAM_DEFAULT_ALLOCATOR}")
if(GTSAM_THROW_CHEIRALITY_EXCEPTION)
print_config("Cheirality exceptions enabled" "YES")
else()
print_config("Cheirality exceptions enabled" "NO")
endif()
if(NOT MSVC AND NOT XCODE_VERSION)
if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
print_config("Build with ccache" "Yes")
elseif(CCACHE_FOUND)
print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
else()
print_config("Build with ccache" "No")
endif()
endif()
message(STATUS "Packaging flags")
print_config("CPack Source Generator" "${CPACK_SOURCE_GENERATOR}")
print_config("CPack Generator" "${CPACK_GENERATOR}")
message(STATUS "GTSAM flags ")
print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ")
print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1")
print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
message(STATUS "MATLAB toolbox flags")
print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ")
if (${GTSAM_INSTALL_MATLAB_TOOLBOX})
print_config("MATLAB root" "${MATLAB_ROOT}")
print_config("MEX binary" "${MEX_COMMAND}")
endif()
message(STATUS "Python toolbox flags ")
print_enabled_config(${GTSAM_BUILD_PYTHON} "Build Python module with pybind ")
if(GTSAM_BUILD_PYTHON)
print_config("Python version" ${GTSAM_PYTHON_VERSION})
endif()
message(STATUS "===============================================================")

29
cmake/HandlePython.cmake Normal file
View File

@ -0,0 +1,29 @@
# Set Python version if either Python or MATLAB wrapper is requested.
if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
if(${GTSAM_PYTHON_VERSION} STREQUAL "Default")
# Get info about the Python3 interpreter
# https://cmake.org/cmake/help/latest/module/FindPython3.html#module:FindPython3
find_package(Python3 COMPONENTS Interpreter Development)
if(NOT ${Python3_FOUND})
message(FATAL_ERROR "Cannot find Python3 interpreter. Please install Python >= 3.6.")
endif()
set(GTSAM_PYTHON_VERSION "${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}"
CACHE
STRING
"The version of Python to build the wrappers against."
FORCE)
endif()
endif()
if(GTSAM_BUILD_PYTHON)
if(GTSAM_UNSTABLE_BUILD_PYTHON)
if (NOT GTSAM_BUILD_UNSTABLE)
message(WARNING "GTSAM_UNSTABLE_BUILD_PYTHON requires the unstable module to be enabled.")
set(GTSAM_UNSTABLE_BUILD_PYTHON OFF)
endif()
endif()
set(GTSAM_PY_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/python")
endif()

24
cmake/HandleTBB.cmake Normal file
View File

@ -0,0 +1,24 @@
###############################################################################
# Find TBB
find_package(TBB 4.4 COMPONENTS tbb tbbmalloc)
# Set up variables if we're using TBB
if(TBB_FOUND AND GTSAM_WITH_TBB)
set(GTSAM_USE_TBB 1) # This will go into config.h
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
set(TBB_GREATER_EQUAL_2020 1)
else()
set(TBB_GREATER_EQUAL_2020 0)
endif()
# all definitions and link requisites will go via imported targets:
# tbb & tbbmalloc
list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
else()
set(GTSAM_USE_TBB 0) # This will go into config.h
endif()
###############################################################################
# Prohibit Timing build mode in combination with TBB
if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing"))
message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.")
endif()

View File

@ -0,0 +1,10 @@
# ----------------------------------------------------------------------------
# Uninstall target, for "make uninstall"
# ----------------------------------------------------------------------------
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake"
IMMEDIATE @ONLY)
add_custom_target(uninstall
"${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake")

View File

@ -1188,7 +1188,7 @@ USE_MATHJAX = YES
# MathJax, but it is strongly recommended to install a local copy of MathJax # MathJax, but it is strongly recommended to install a local copy of MathJax
# before deployment. # before deployment.
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest MATHJAX_RELPATH = https://cdn.mathjax.org/mathjax/latest
# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
# names that should be enabled during MathJax rendering. # names that should be enabled during MathJax rendering.

View File

@ -7,9 +7,9 @@ FROM dellaert/ubuntu-gtsam:bionic
RUN apt-get install -y python3-pip python3-dev RUN apt-get install -y python3-pip python3-dev
# Install python wrapper requirements # Install python wrapper requirements
RUN python3 -m pip install -U -r /usr/src/gtsam/cython/requirements.txt RUN python3 -m pip install -U -r /usr/src/gtsam/python/requirements.txt
# Run cmake again, now with cython toolbox on # Run cmake again, now with python toolbox on
WORKDIR /usr/src/gtsam/build WORKDIR /usr/src/gtsam/build
RUN cmake \ RUN cmake \
-DCMAKE_BUILD_TYPE=Release \ -DCMAKE_BUILD_TYPE=Release \
@ -17,7 +17,7 @@ RUN cmake \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_BUILD_TIMING_ALWAYS=OFF \ -DGTSAM_BUILD_TIMING_ALWAYS=OFF \
-DGTSAM_BUILD_TESTS=OFF \ -DGTSAM_BUILD_TESTS=OFF \
-DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \ -DGTSAM_BUILD_PYTHON=ON \
-DGTSAM_PYTHON_VERSION=3\ -DGTSAM_PYTHON_VERSION=3\
.. ..
@ -25,7 +25,7 @@ RUN cmake \
RUN make -j4 install && make clean RUN make -j4 install && make clean
# Needed to run python wrapper: # Needed to run python wrapper:
RUN echo 'export PYTHONPATH=/usr/local/cython/:$PYTHONPATH' >> /root/.bashrc RUN echo 'export PYTHONPATH=/usr/local/python/:$PYTHONPATH' >> /root/.bashrc
# Run bash # Run bash
CMD ["bash"] CMD ["bash"]

View File

@ -23,7 +23,6 @@ RUN cmake \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_BUILD_TIMING_ALWAYS=OFF \ -DGTSAM_BUILD_TIMING_ALWAYS=OFF \
-DGTSAM_BUILD_TESTS=OFF \ -DGTSAM_BUILD_TESTS=OFF \
-DGTSAM_INSTALL_CYTHON_TOOLBOX=OFF \
.. ..
# Build # Build

View File

@ -23,6 +23,7 @@
#include <boost/optional.hpp> #include <boost/optional.hpp>
#include <map> #include <map>
#include <iostream> #include <iostream>
#include <sstream>
#include <vector> #include <vector>
namespace gtsam { namespace gtsam {
@ -349,4 +350,44 @@ bool assert_inequal(const V& expected, const V& actual, double tol = 1e-9) {
return false; return false;
} }
/**
* Capture std out via cout stream and compare against string.
*/
template<class V>
bool assert_stdout_equal(const std::string& expected, const V& actual) {
// Redirect output to buffer so we can compare
std::stringstream buffer;
// Save the original output stream so we can reset later
std::streambuf* old = std::cout.rdbuf(buffer.rdbuf());
// We test against actual std::cout for faithful reproduction
std::cout << actual;
// Get output string and reset stdout
std::string actual_ = buffer.str();
std::cout.rdbuf(old);
return assert_equal(expected, actual_);
}
/**
* Capture print function output and compare against string.
*/
template<class V>
bool assert_print_equal(const std::string& expected, const V& actual) {
// Redirect output to buffer so we can compare
std::stringstream buffer;
// Save the original output stream so we can reset later
std::streambuf* old = std::cout.rdbuf(buffer.rdbuf());
// We test against actual std::cout for faithful reproduction
actual.print();
// Get output string and reset stdout
std::string actual_ = buffer.str();
std::cout.rdbuf(old);
return assert_equal(expected, actual_);
}
} // \namespace gtsam } // \namespace gtsam

View File

@ -98,6 +98,17 @@ public:
return k2_; return k2_;
} }
/// image center in x
inline double px() const {
return u0_;
}
/// image center in y
inline double py() const {
return v0_;
}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
/// get parameter u0 /// get parameter u0
inline double u0() const { inline double u0() const {
return u0_; return u0_;
@ -107,6 +118,7 @@ public:
inline double v0() const { inline double v0() const {
return v0_; return v0_;
} }
#endif
/** /**

View File

@ -262,9 +262,29 @@ namespace gtsam {
static Rot3 AlignTwoPairs(const Unit3& a_p, const Unit3& b_p, // static Rot3 AlignTwoPairs(const Unit3& a_p, const Unit3& b_p, //
const Unit3& a_q, const Unit3& b_q); const Unit3& a_q, const Unit3& b_q);
/// Static, named constructor that finds Rot3 element closest to M in Frobenius norm. /**
* Static, named constructor that finds Rot3 element closest to M in Frobenius norm.
*
* Uses Full SVD to compute the orthogonal matrix, thus is highly accurate and robust.
*
* N. J. Higham. Matrix nearness problems and applications.
* In M. J. C. Gover and S. Barnett, editors, Applications of Matrix Theory, pages 127.
* Oxford University Press, 1989.
*/
static Rot3 ClosestTo(const Matrix3& M) { return Rot3(SO3::ClosestTo(M)); } static Rot3 ClosestTo(const Matrix3& M) { return Rot3(SO3::ClosestTo(M)); }
/**
* Normalize rotation so that its determinant is 1.
* This means either re-orthogonalizing the Matrix representation or
* normalizing the quaternion representation.
*
* This method is akin to `ClosestTo` but uses a computationally cheaper
* algorithm.
*
* Ref: https://drive.google.com/file/d/0B9rLLz1XQKmaZTlQdV81QjNoZTA/view
*/
Rot3 normalized() const;
/// @} /// @}
/// @name Testable /// @name Testable
/// @{ /// @{
@ -506,7 +526,7 @@ namespace gtsam {
/** /**
* @brief Spherical Linear intERPolation between *this and other * @brief Spherical Linear intERPolation between *this and other
* @param s a value between 0 and 1 * @param t a value between 0 and 1
* @param other final point of iterpolation geodesic on manifold * @param other final point of iterpolation geodesic on manifold
*/ */
Rot3 slerp(double t, const Rot3& other) const; Rot3 slerp(double t, const Rot3& other) const;

View File

@ -108,6 +108,33 @@ Rot3 Rot3::RzRyRx(double x, double y, double z, OptionalJacobian<3, 1> Hx,
); );
} }
/* ************************************************************************* */
Rot3 Rot3::normalized() const {
/// Implementation from here: https://stackoverflow.com/a/23082112/1236990
/// Essentially, this computes the orthogonalization error, distributes the
/// error to the x and y rows, and then performs a Taylor expansion to
/// orthogonalize.
Matrix3 rot = rot_.matrix(), rot_orth;
// Check if determinant is already 1.
// If yes, then return the current Rot3.
if (std::fabs(rot.determinant()-1) < 1e-12) return Rot3(rot_);
Vector3 x = rot.block<1, 3>(0, 0), y = rot.block<1, 3>(1, 0);
double error = x.dot(y);
Vector3 x_ort = x - (error / 2) * y, y_ort = y - (error / 2) * x;
Vector3 z_ort = x_ort.cross(y_ort);
rot_orth.block<1, 3>(0, 0) = 0.5 * (3 - x_ort.dot(x_ort)) * x_ort;
rot_orth.block<1, 3>(1, 0) = 0.5 * (3 - y_ort.dot(y_ort)) * y_ort;
rot_orth.block<1, 3>(2, 0) = 0.5 * (3 - z_ort.dot(z_ort)) * z_ort;
return Rot3(rot_orth);
}
/* ************************************************************************* */ /* ************************************************************************* */
Rot3 Rot3::operator*(const Rot3& R2) const { Rot3 Rot3::operator*(const Rot3& R2) const {
return Rot3(rot_*R2.rot_); return Rot3(rot_*R2.rot_);
@ -149,7 +176,17 @@ Vector3 Rot3::CayleyChart::Local(const Rot3& R, OptionalJacobian<3,3> H) {
if (H) throw std::runtime_error("Rot3::CayleyChart::Local Derivative"); if (H) throw std::runtime_error("Rot3::CayleyChart::Local Derivative");
// Create a fixed-size matrix // Create a fixed-size matrix
Matrix3 A = R.matrix(); Matrix3 A = R.matrix();
// Mathematica closed form optimization (procrastination?) gone wild:
// Check if (A+I) is invertible. Same as checking for -1 eigenvalue.
if ((A + I_3x3).determinant() == 0.0) {
throw std::runtime_error("Rot3::CayleyChart::Local Invalid Rotation");
}
// Mathematica closed form optimization.
// The following are the essential computations for the following algorithm
// 1. Compute the inverse of P = (A+I), using a closed-form formula since P is 3x3
// 2. Compute the Cayley transform C = 2 * P^{-1} * (A-I)
// 3. C is skew-symmetric, so we pick out the computations corresponding only to x, y, and z.
const double a = A(0, 0), b = A(0, 1), c = A(0, 2); const double a = A(0, 0), b = A(0, 1), c = A(0, 2);
const double d = A(1, 0), e = A(1, 1), f = A(1, 2); const double d = A(1, 0), e = A(1, 1), f = A(1, 2);
const double g = A(2, 0), h = A(2, 1), i = A(2, 2); const double g = A(2, 0), h = A(2, 1), i = A(2, 2);

View File

@ -86,6 +86,10 @@ namespace gtsam {
gtsam::Quaternion(Eigen::AngleAxisd(x, Eigen::Vector3d::UnitX()))); gtsam::Quaternion(Eigen::AngleAxisd(x, Eigen::Vector3d::UnitX())));
} }
/* ************************************************************************* */
Rot3 Rot3::normalized() const {
return Rot3(quaternion_.normalized());
}
/* ************************************************************************* */ /* ************************************************************************* */
Rot3 Rot3::operator*(const Rot3& R2) const { Rot3 Rot3::operator*(const Rot3& R2) const {
return Rot3(quaternion_ * R2.quaternion_); return Rot3(quaternion_ * R2.quaternion_);

View File

@ -16,6 +16,7 @@
#include <CppUnitLite/TestHarness.h> #include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam/base/TestableAssertions.h>
#include <gtsam/base/numericalDerivative.h> #include <gtsam/base/numericalDerivative.h>
#include <gtsam/geometry/Cal3_S2.h> #include <gtsam/geometry/Cal3_S2.h>
@ -127,6 +128,16 @@ TEST(Cal3_S2, between) {
} }
/* ************************************************************************* */
TEST(Cal3_S2, Print) {
Cal3_S2 cal(5, 5, 5, 5, 5);
std::stringstream os;
os << "{fx: " << cal.fx() << ", fy: " << cal.fy() << ", s:" << cal.skew() << ", px:" << cal.px()
<< ", py:" << cal.py() << "}";
EXPECT(assert_stdout_equal(os.str(), cal));
}
/* ************************************************************************* */ /* ************************************************************************* */
int main() { int main() {
TestResult tr; TestResult tr;

View File

@ -18,6 +18,7 @@
#include <gtsam/geometry/Pose2.h> #include <gtsam/geometry/Pose2.h>
#include <gtsam/base/testLie.h> #include <gtsam/base/testLie.h>
#include <gtsam/base/lieProxies.h> #include <gtsam/base/lieProxies.h>
#include <gtsam/base/TestableAssertions.h>
#include <boost/assign/std/vector.hpp> // for operator += #include <boost/assign/std/vector.hpp> // for operator +=
using namespace boost::assign; using namespace boost::assign;
@ -906,9 +907,9 @@ TEST(Pose3 , ChartDerivatives) {
Pose3 id; Pose3 id;
if (ROT3_DEFAULT_COORDINATES_MODE == Rot3::EXPMAP) { if (ROT3_DEFAULT_COORDINATES_MODE == Rot3::EXPMAP) {
CHECK_CHART_DERIVATIVES(id,id); CHECK_CHART_DERIVATIVES(id,id);
// CHECK_CHART_DERIVATIVES(id,T2); CHECK_CHART_DERIVATIVES(id,T2);
// CHECK_CHART_DERIVATIVES(T2,id); CHECK_CHART_DERIVATIVES(T2,id);
// CHECK_CHART_DERIVATIVES(T2,T3); CHECK_CHART_DERIVATIVES(T2,T3);
} }
} }
@ -1028,32 +1029,13 @@ TEST(Pose3, Create) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(Pose3, print) { TEST(Pose3, Print) {
std::stringstream redirectStream;
std::streambuf* ssbuf = redirectStream.rdbuf();
std::streambuf* oldbuf = std::cout.rdbuf();
// redirect cout to redirectStream
std::cout.rdbuf(ssbuf);
Pose3 pose(Rot3::identity(), Point3(1, 2, 3)); Pose3 pose(Rot3::identity(), Point3(1, 2, 3));
// output is captured to redirectStream
pose.print();
// Generate the expected output // Generate the expected output
std::stringstream expected; std::string expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: 1 2 3\n";
Point3 translation(1, 2, 3);
// Add expected rotation EXPECT(assert_print_equal(expected, pose));
expected << "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\n";
expected << "t: 1 2 3\n";
// reset cout to the original stream
std::cout.rdbuf(oldbuf);
// Get substring corresponding to translation part
std::string actual = redirectStream.str();
CHECK_EQUAL(expected.str(), actual);
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -910,6 +910,26 @@ TEST(Rot3, yaw_derivative) {
CHECK(assert_equal(num, calc)); CHECK(assert_equal(num, calc));
} }
/* ************************************************************************* */
TEST(Rot3, determinant) {
size_t degree = 1;
Rot3 R_w0; // Zero rotation
Rot3 R_w1 = Rot3::Ry(degree * M_PI / 180);
Rot3 R_01, R_w2;
double actual, expected = 1.0;
for (size_t i = 2; i < 360; ++i) {
R_01 = R_w0.between(R_w1);
R_w2 = R_w1 * R_01;
R_w0 = R_w1;
R_w1 = R_w2.normalized();
actual = R_w2.matrix().determinant();
EXPECT_DOUBLES_EQUAL(expected, actual, 1e-7);
}
}
/* ************************************************************************* */ /* ************************************************************************* */
int main() { int main() {
TestResult tr; TestResult tr;

View File

@ -57,7 +57,7 @@ static StereoCamera cam2(pose3, cal4ptr);
static StereoPoint2 spt(1.0, 2.0, 3.0); static StereoPoint2 spt(1.0, 2.0, 3.0);
/* ************************************************************************* */ /* ************************************************************************* */
TEST_DISABLED (Serialization, text_geometry) { TEST (Serialization, text_geometry) {
EXPECT(equalsObj<gtsam::Point2>(Point2(1.0, 2.0))); EXPECT(equalsObj<gtsam::Point2>(Point2(1.0, 2.0)));
EXPECT(equalsObj<gtsam::Pose2>(Pose2(1.0, 2.0, 0.3))); EXPECT(equalsObj<gtsam::Pose2>(Pose2(1.0, 2.0, 0.3)));
EXPECT(equalsObj<gtsam::Rot2>(Rot2::fromDegrees(30.0))); EXPECT(equalsObj<gtsam::Rot2>(Rot2::fromDegrees(30.0)));
@ -82,7 +82,7 @@ TEST_DISABLED (Serialization, text_geometry) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST_DISABLED (Serialization, xml_geometry) { TEST (Serialization, xml_geometry) {
EXPECT(equalsXML<gtsam::Point2>(Point2(1.0, 2.0))); EXPECT(equalsXML<gtsam::Point2>(Point2(1.0, 2.0)));
EXPECT(equalsXML<gtsam::Pose2>(Pose2(1.0, 2.0, 0.3))); EXPECT(equalsXML<gtsam::Pose2>(Pose2(1.0, 2.0, 0.3)));
EXPECT(equalsXML<gtsam::Rot2>(Rot2::fromDegrees(30.0))); EXPECT(equalsXML<gtsam::Rot2>(Rot2::fromDegrees(30.0)));
@ -106,7 +106,7 @@ TEST_DISABLED (Serialization, xml_geometry) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST_DISABLED (Serialization, binary_geometry) { TEST (Serialization, binary_geometry) {
EXPECT(equalsBinary<gtsam::Point2>(Point2(1.0, 2.0))); EXPECT(equalsBinary<gtsam::Point2>(Point2(1.0, 2.0)));
EXPECT(equalsBinary<gtsam::Pose2>(Pose2(1.0, 2.0, 0.3))); EXPECT(equalsBinary<gtsam::Pose2>(Pose2(1.0, 2.0, 0.3)));
EXPECT(equalsBinary<gtsam::Rot2>(Rot2::fromDegrees(30.0))); EXPECT(equalsBinary<gtsam::Rot2>(Rot2::fromDegrees(30.0)));

View File

@ -93,9 +93,9 @@
* - Add "void serializable()" to a class if you only want the class to be serialized as a * - Add "void serializable()" to a class if you only want the class to be serialized as a
* part of a container (such as noisemodel). This version does not require a publicly * part of a container (such as noisemodel). This version does not require a publicly
* accessible default constructor. * accessible default constructor.
* Forward declarations and class definitions for Cython: * Forward declarations and class definitions for Pybind:
* - Need to specify the base class (both this forward class and base class are declared in an external cython header) * - Need to specify the base class (both this forward class and base class are declared in an external Pybind header)
* This is so Cython can generate proper inheritance. * This is so Pybind can generate proper inheritance.
* Example when wrapping a gtsam-based project: * Example when wrapping a gtsam-based project:
* // forward declarations * // forward declarations
* virtual class gtsam::NonlinearFactor * virtual class gtsam::NonlinearFactor
@ -104,7 +104,7 @@
* #include <MyFactor.h> * #include <MyFactor.h>
* virtual class MyFactor : gtsam::NoiseModelFactor {...}; * virtual class MyFactor : gtsam::NoiseModelFactor {...};
* - *DO NOT* re-define overriden function already declared in the external (forward-declared) base class * - *DO NOT* re-define overriden function already declared in the external (forward-declared) base class
* - This will cause an ambiguity problem in Cython pxd header file * - This will cause an ambiguity problem in Pybind header file
*/ */
/** /**
@ -597,6 +597,7 @@ class Rot3 {
Rot3(double R11, double R12, double R13, Rot3(double R11, double R12, double R13,
double R21, double R22, double R23, double R21, double R22, double R23,
double R31, double R32, double R33); double R31, double R32, double R33);
Rot3(double w, double x, double y, double z);
static gtsam::Rot3 Rx(double t); static gtsam::Rot3 Rx(double t);
static gtsam::Rot3 Ry(double t); static gtsam::Rot3 Ry(double t);
@ -980,8 +981,8 @@ class Cal3Bundler {
double fy() const; double fy() const;
double k1() const; double k1() const;
double k2() const; double k2() const;
double u0() const; double px() const;
double v0() const; double py() const;
Vector vector() const; Vector vector() const;
Vector k() const; Vector k() const;
Matrix K() const; Matrix K() const;
@ -2068,7 +2069,7 @@ class NonlinearFactorGraph {
gtsam::KeySet keys() const; gtsam::KeySet keys() const;
gtsam::KeyVector keyVector() const; gtsam::KeyVector keyVector() const;
template<T = {Vector, gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2, gtsam::SO3, gtsam::SO4, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::Cal3_S2,gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::PinholeCameraCal3_S2, gtsam::imuBias::ConstantBias}> template<T = {Vector, gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2, gtsam::SO3, gtsam::SO4, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::Cal3_S2,gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::PinholeCameraCal3_S2, gtsam::PinholeCamera<gtsam::Cal3Bundler>, gtsam::imuBias::ConstantBias}>
void addPrior(size_t key, const T& prior, const gtsam::noiseModel::Base* noiseModel); void addPrior(size_t key, const T& prior, const gtsam::noiseModel::Base* noiseModel);
// NonlinearFactorGraph // NonlinearFactorGraph
@ -2157,12 +2158,13 @@ class Values {
void insert(size_t j, const gtsam::SOn& P); void insert(size_t j, const gtsam::SOn& P);
void insert(size_t j, const gtsam::Rot3& rot3); void insert(size_t j, const gtsam::Rot3& rot3);
void insert(size_t j, const gtsam::Pose3& pose3); void insert(size_t j, const gtsam::Pose3& pose3);
void insert(size_t j, const gtsam::Unit3& unit3);
void insert(size_t j, const gtsam::Cal3_S2& cal3_s2); void insert(size_t j, const gtsam::Cal3_S2& cal3_s2);
void insert(size_t j, const gtsam::Cal3DS2& cal3ds2); void insert(size_t j, const gtsam::Cal3DS2& cal3ds2);
void insert(size_t j, const gtsam::Cal3Bundler& cal3bundler); void insert(size_t j, const gtsam::Cal3Bundler& cal3bundler);
void insert(size_t j, const gtsam::EssentialMatrix& essential_matrix); void insert(size_t j, const gtsam::EssentialMatrix& essential_matrix);
void insert(size_t j, const gtsam::PinholeCameraCal3_S2& simple_camera); void insert(size_t j, const gtsam::PinholeCameraCal3_S2& simple_camera);
// void insert(size_t j, const gtsam::PinholeCameraCal3Bundler& camera); void insert(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Bundler>& camera);
void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias);
void insert(size_t j, const gtsam::NavState& nav_state); void insert(size_t j, const gtsam::NavState& nav_state);
@ -2175,18 +2177,19 @@ class Values {
void update(size_t j, const gtsam::SOn& P); void update(size_t j, const gtsam::SOn& P);
void update(size_t j, const gtsam::Rot3& rot3); void update(size_t j, const gtsam::Rot3& rot3);
void update(size_t j, const gtsam::Pose3& pose3); void update(size_t j, const gtsam::Pose3& pose3);
void update(size_t j, const gtsam::Unit3& unit3);
void update(size_t j, const gtsam::Cal3_S2& cal3_s2); void update(size_t j, const gtsam::Cal3_S2& cal3_s2);
void update(size_t j, const gtsam::Cal3DS2& cal3ds2); void update(size_t j, const gtsam::Cal3DS2& cal3ds2);
void update(size_t j, const gtsam::Cal3Bundler& cal3bundler); void update(size_t j, const gtsam::Cal3Bundler& cal3bundler);
void update(size_t j, const gtsam::EssentialMatrix& essential_matrix); void update(size_t j, const gtsam::EssentialMatrix& essential_matrix);
void update(size_t j, const gtsam::PinholeCameraCal3_S2& simple_camera); void update(size_t j, const gtsam::PinholeCameraCal3_S2& simple_camera);
// void update(size_t j, const gtsam::PinholeCameraCal3Bundler& camera); void update(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Bundler>& camera);
void update(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); void update(size_t j, const gtsam::imuBias::ConstantBias& constant_bias);
void update(size_t j, const gtsam::NavState& nav_state); void update(size_t j, const gtsam::NavState& nav_state);
void update(size_t j, Vector vector); void update(size_t j, Vector vector);
void update(size_t j, Matrix matrix); void update(size_t j, Matrix matrix);
template<T = {gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Pose2, gtsam::SO3, gtsam::SO4, gtsam::SOn, gtsam::Rot3, gtsam::Pose3, gtsam::Cal3_S2, gtsam::Cal3DS2, gtsam::Cal3Bundler, gtsam::EssentialMatrix, gtsam::PinholeCameraCal3_S2, gtsam::imuBias::ConstantBias, gtsam::NavState, Vector, Matrix}> template<T = {gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Pose2, gtsam::SO3, gtsam::SO4, gtsam::SOn, gtsam::Rot3, gtsam::Pose3, gtsam::Unit3, gtsam::Cal3_S2, gtsam::Cal3DS2, gtsam::Cal3Bundler, gtsam::EssentialMatrix, gtsam::PinholeCameraCal3_S2, gtsam::PinholeCamera<gtsam::Cal3Bundler>, gtsam::imuBias::ConstantBias, gtsam::NavState, Vector, Matrix}>
T at(size_t j); T at(size_t j);
/// version for double /// version for double
@ -2490,7 +2493,8 @@ class ISAM2 {
template <VALUE = {gtsam::Point2, gtsam::Rot2, gtsam::Pose2, gtsam::Point3, template <VALUE = {gtsam::Point2, gtsam::Rot2, gtsam::Pose2, gtsam::Point3,
gtsam::Rot3, gtsam::Pose3, gtsam::Cal3_S2, gtsam::Cal3DS2, gtsam::Rot3, gtsam::Pose3, gtsam::Cal3_S2, gtsam::Cal3DS2,
gtsam::Cal3Bundler, gtsam::EssentialMatrix, gtsam::Cal3Bundler, gtsam::EssentialMatrix,
gtsam::SimpleCamera, gtsam::PinholeCameraCal3_S2, Vector, Matrix}> gtsam::SimpleCamera, gtsam::PinholeCameraCal3_S2, gtsam::PinholeCamera<gtsam::Cal3Bundler>,
Vector, Matrix}>
VALUE calculateEstimate(size_t key) const; VALUE calculateEstimate(size_t key) const;
gtsam::Values calculateBestEstimate() const; gtsam::Values calculateBestEstimate() const;
Matrix marginalCovariance(size_t key) const; Matrix marginalCovariance(size_t key) const;
@ -2528,7 +2532,7 @@ class NonlinearISAM {
#include <gtsam/geometry/StereoPoint2.h> #include <gtsam/geometry/StereoPoint2.h>
#include <gtsam/nonlinear/PriorFactor.h> #include <gtsam/nonlinear/PriorFactor.h>
template<T = {Vector, gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2, gtsam::SO3, gtsam::SO4, gtsam::SOn, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::Cal3_S2,gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::PinholeCameraCal3_S2, gtsam::imuBias::ConstantBias}> template<T = {Vector, gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2, gtsam::SO3, gtsam::SO4, gtsam::SOn, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::Unit3, gtsam::Cal3_S2,gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::PinholeCameraCal3_S2, gtsam::imuBias::ConstantBias, gtsam::PinholeCamera<gtsam::Cal3Bundler>}>
virtual class PriorFactor : gtsam::NoiseModelFactor { virtual class PriorFactor : gtsam::NoiseModelFactor {
PriorFactor(size_t key, const T& prior, const gtsam::noiseModel::Base* noiseModel); PriorFactor(size_t key, const T& prior, const gtsam::noiseModel::Base* noiseModel);
T prior() const; T prior() const;
@ -2673,6 +2677,7 @@ virtual class GeneralSFMFactor : gtsam::NoiseModelFactor {
typedef gtsam::GeneralSFMFactor<gtsam::PinholeCameraCal3_S2, gtsam::Point3> GeneralSFMFactorCal3_S2; typedef gtsam::GeneralSFMFactor<gtsam::PinholeCameraCal3_S2, gtsam::Point3> GeneralSFMFactorCal3_S2;
//TODO (Issue 237) due to lack of jacobians of Cal3DS2_Base::calibrate, GeneralSFMFactor does not apply to Cal3DS2 //TODO (Issue 237) due to lack of jacobians of Cal3DS2_Base::calibrate, GeneralSFMFactor does not apply to Cal3DS2
//typedef gtsam::GeneralSFMFactor<gtsam::PinholeCameraCal3DS2, gtsam::Point3> GeneralSFMFactorCal3DS2; //typedef gtsam::GeneralSFMFactor<gtsam::PinholeCameraCal3DS2, gtsam::Point3> GeneralSFMFactorCal3DS2;
typedef gtsam::GeneralSFMFactor<gtsam::PinholeCamera<gtsam::Cal3Bundler>, gtsam::Point3> GeneralSFMFactorCal3Bundler;
template<CALIBRATION = {gtsam::Cal3_S2}> template<CALIBRATION = {gtsam::Cal3_S2}>
virtual class GeneralSFMFactor2 : gtsam::NoiseModelFactor { virtual class GeneralSFMFactor2 : gtsam::NoiseModelFactor {
@ -2759,21 +2764,36 @@ virtual class EssentialMatrixFactor : gtsam::NoiseModelFactor {
}; };
#include <gtsam/slam/dataset.h> #include <gtsam/slam/dataset.h>
class SfmTrack { class SfmTrack {
Point3 point3() const; SfmTrack();
SfmTrack(const gtsam::Point3& pt);
const Point3& point3() const;
double r;
double g;
double b;
// TODO Need to close wrap#10 to allow this to work.
// std::vector<pair<size_t, gtsam::Point2>> measurements;
size_t number_measurements() const; size_t number_measurements() const;
pair<size_t, gtsam::Point2> measurement(size_t idx) const; pair<size_t, gtsam::Point2> measurement(size_t idx) const;
pair<size_t, size_t> siftIndex(size_t idx) const; pair<size_t, size_t> siftIndex(size_t idx) const;
void add_measurement(size_t idx, const gtsam::Point2& m);
}; };
class SfmData { class SfmData {
SfmData();
size_t number_cameras() const; size_t number_cameras() const;
size_t number_tracks() const; size_t number_tracks() const;
gtsam::PinholeCamera<gtsam::Cal3Bundler> camera(size_t idx) const; gtsam::PinholeCamera<gtsam::Cal3Bundler> camera(size_t idx) const;
gtsam::SfmTrack track(size_t idx) const; gtsam::SfmTrack track(size_t idx) const;
void add_track(const gtsam::SfmTrack& t) ;
void add_camera(const gtsam::SfmCamera& cam);
}; };
gtsam::SfmData readBal(string filename); gtsam::SfmData readBal(string filename);
bool writeBAL(string filename, gtsam::SfmData& data);
gtsam::Values initialCamerasEstimate(const gtsam::SfmData& db); gtsam::Values initialCamerasEstimate(const gtsam::SfmData& db);
gtsam::Values initialCamerasAndPointsEstimate(const gtsam::SfmData& db); gtsam::Values initialCamerasAndPointsEstimate(const gtsam::SfmData& db);
@ -2885,6 +2905,7 @@ class BinaryMeasurement {
size_t key1() const; size_t key1() const;
size_t key2() const; size_t key2() const;
T measured() const; T measured() const;
gtsam::noiseModel::Base* noiseModel() const;
}; };
typedef gtsam::BinaryMeasurement<gtsam::Unit3> BinaryMeasurementUnit3; typedef gtsam::BinaryMeasurement<gtsam::Unit3> BinaryMeasurementUnit3;
@ -3018,6 +3039,26 @@ class ShonanAveraging3 {
pair<gtsam::Values, double> run(const gtsam::Values& initial, size_t min_p, size_t max_p) const; pair<gtsam::Values, double> run(const gtsam::Values& initial, size_t min_p, size_t max_p) const;
}; };
#include <gtsam/sfm/MFAS.h>
class KeyPairDoubleMap {
KeyPairDoubleMap();
KeyPairDoubleMap(const gtsam::KeyPairDoubleMap& other);
size_t size() const;
bool empty() const;
void clear();
size_t at(const pair<size_t, size_t>& keypair) const;
};
class MFAS {
MFAS(const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
const gtsam::Unit3& projectionDirection);
gtsam::KeyPairDoubleMap computeOutlierWeights() const;
gtsam::KeyVector computeOrdering() const;
};
#include <gtsam/sfm/TranslationRecovery.h> #include <gtsam/sfm/TranslationRecovery.h>
class TranslationRecovery { class TranslationRecovery {
TranslationRecovery(const gtsam::BinaryMeasurementsUnit3 &relativeTranslations, TranslationRecovery(const gtsam::BinaryMeasurementsUnit3 &relativeTranslations,

View File

@ -164,8 +164,16 @@ inline Key Y(std::uint64_t j) { return Symbol('y', j); }
inline Key Z(std::uint64_t j) { return Symbol('z', j); } inline Key Z(std::uint64_t j) { return Symbol('z', j); }
} }
/** Generates symbol shorthands with alternative names different than the
* one-letter predefined ones. */
class SymbolGenerator {
const char c_;
public:
SymbolGenerator(const char c) : c_(c) {}
Symbol operator()(const std::uint64_t j) const { return Symbol(c_, j); }
};
/// traits /// traits
template<> struct traits<Symbol> : public Testable<Symbol> {}; template<> struct traits<Symbol> : public Testable<Symbol> {};
} // \ namespace gtsam } // \ namespace gtsam

View File

@ -40,6 +40,25 @@ TEST(Key, KeySymbolConversion) {
EXPECT(assert_equal(original, actual)) EXPECT(assert_equal(original, actual))
} }
/* ************************************************************************* */
TEST(Key, SymbolGenerator) {
const auto x1 = gtsam::symbol_shorthand::X(1);
const auto v1 = gtsam::symbol_shorthand::V(1);
const auto a1 = gtsam::symbol_shorthand::A(1);
const auto Z = gtsam::SymbolGenerator('x');
const auto DZ = gtsam::SymbolGenerator('v');
const auto DDZ = gtsam::SymbolGenerator('a');
const auto z1 = Z(1);
const auto dz1 = DZ(1);
const auto ddz1 = DDZ(1);
EXPECT(assert_equal(x1, z1));
EXPECT(assert_equal(v1, dz1));
EXPECT(assert_equal(a1, ddz1));
}
/* ************************************************************************* */ /* ************************************************************************* */
template<int KeySize> template<int KeySize>
Key KeyTestValue(); Key KeyTestValue();
@ -106,4 +125,3 @@ int main() {
return TestRegistry::runAllTests(tr); return TestRegistry::runAllTests(tr);
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -42,7 +42,7 @@ class GTSAM_EXPORT PreintegratedAhrsMeasurements : public PreintegratedRotation
public: public:
/// Default constructor, only for serialization and Cython wrapper /// Default constructor, only for serialization and wrappers
PreintegratedAhrsMeasurements() {} PreintegratedAhrsMeasurements() {}
/** /**

View File

@ -145,7 +145,7 @@ public:
/// @name Constructors /// @name Constructors
/// @{ /// @{
/// Default constructor only for serialization and Cython wrapper /// Default constructor only for serialization and wrappers
PreintegratedCombinedMeasurements() { PreintegratedCombinedMeasurements() {
preintMeasCov_.setZero(); preintMeasCov_.setZero();
} }

View File

@ -80,7 +80,7 @@ protected:
public: public:
/// Default constructor for serialization and Cython wrapper /// Default constructor for serialization and wrappers
PreintegratedImuMeasurements() { PreintegratedImuMeasurements() {
preintMeasCov_.setZero(); preintMeasCov_.setZero();
} }

View File

@ -48,7 +48,7 @@ protected:
public: public:
/// Default constructor only for Cython wrapper /// Default constructor only for wrappers
Marginals(){} Marginals(){}
/** Construct a marginals class from a nonlinear factor graph. /** Construct a marginals class from a nonlinear factor graph.
@ -156,7 +156,7 @@ protected:
FastMap<Key, size_t> indices_; FastMap<Key, size_t> indices_;
public: public:
/// Default constructor only for Cython wrapper /// Default constructor only for wrappers
JointMarginal() {} JointMarginal() {}
/** Access a block, corresponding to a pair of variables, of the joint /** Access a block, corresponding to a pair of variables, of the joint

View File

@ -200,6 +200,10 @@ boost::tuple<V, int> nonlinearConjugateGradient(const S &system,
currentValues = system.advance(prevValues, alpha, direction); currentValues = system.advance(prevValues, alpha, direction);
currentError = system.error(currentValues); currentError = system.error(currentValues);
// User hook:
if (params.iterationHook)
params.iterationHook(iteration, prevError, currentError);
// Maybe show output // Maybe show output
if (params.verbosity >= NonlinearOptimizerParams::ERROR) if (params.verbosity >= NonlinearOptimizerParams::ERROR)
std::cout << "iteration: " << iteration << ", currentError: " << currentError << std::endl; std::cout << "iteration: " << iteration << ", currentError: " << currentError << std::endl;

View File

@ -88,20 +88,28 @@ void NonlinearOptimizer::defaultOptimize() {
} }
// Iterative loop // Iterative loop
double newError = currentError; // used to avoid repeated calls to error()
do { do {
// Do next iteration // Do next iteration
currentError = error(); // TODO(frank): don't do this twice at first !? Computed above! currentError = newError;
iterate(); iterate();
tictoc_finishedIteration(); tictoc_finishedIteration();
// Update newError for either printouts or conditional-end checks:
newError = error();
// User hook:
if (params.iterationHook)
params.iterationHook(iterations(), currentError, newError);
// Maybe show output // Maybe show output
if (params.verbosity >= NonlinearOptimizerParams::VALUES) if (params.verbosity >= NonlinearOptimizerParams::VALUES)
values().print("newValues"); values().print("newValues");
if (params.verbosity >= NonlinearOptimizerParams::ERROR) if (params.verbosity >= NonlinearOptimizerParams::ERROR)
cout << "newError: " << error() << endl; cout << "newError: " << newError << endl;
} while (iterations() < params.maxIterations && } while (iterations() < params.maxIterations &&
!checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, params.errorTol, !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, params.errorTol,
currentError, error(), params.verbosity) && std::isfinite(currentError)); currentError, newError, params.verbosity) && std::isfinite(currentError));
// Printing if verbose // Printing if verbose
if (params.verbosity >= NonlinearOptimizerParams::TERMINATION) { if (params.verbosity >= NonlinearOptimizerParams::TERMINATION) {

View File

@ -81,7 +81,7 @@ protected:
public: public:
/** A shared pointer to this class */ /** A shared pointer to this class */
typedef boost::shared_ptr<const NonlinearOptimizer> shared_ptr; using shared_ptr = boost::shared_ptr<const NonlinearOptimizer>;
/// @name Standard interface /// @name Standard interface
/// @{ /// @{

View File

@ -38,21 +38,12 @@ public:
SILENT, TERMINATION, ERROR, VALUES, DELTA, LINEAR SILENT, TERMINATION, ERROR, VALUES, DELTA, LINEAR
}; };
size_t maxIterations; ///< The maximum iterations to stop iterating (default 100) size_t maxIterations = 100; ///< The maximum iterations to stop iterating (default 100)
double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) double relativeErrorTol = 1e-5; ///< The maximum relative error decrease to stop iterating (default 1e-5)
double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) double absoluteErrorTol = 1e-5; ///< The maximum absolute error decrease to stop iterating (default 1e-5)
double errorTol; ///< The maximum total error to stop iterating (default 0.0) double errorTol = 0.0; ///< The maximum total error to stop iterating (default 0.0)
Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT) Verbosity verbosity = SILENT; ///< The printing verbosity during optimization (default SILENT)
Ordering::OrderingType orderingType; ///< The method of ordering use during variable elimination (default COLAMD) Ordering::OrderingType orderingType = Ordering::COLAMD; ///< The method of ordering use during variable elimination (default COLAMD)
NonlinearOptimizerParams() :
maxIterations(100), relativeErrorTol(1e-5), absoluteErrorTol(1e-5), errorTol(
0.0), verbosity(SILENT), orderingType(Ordering::COLAMD),
linearSolverType(MULTIFRONTAL_CHOLESKY) {}
virtual ~NonlinearOptimizerParams() {
}
virtual void print(const std::string& str = "") const;
size_t getMaxIterations() const { return maxIterations; } size_t getMaxIterations() const { return maxIterations; }
double getRelativeErrorTol() const { return relativeErrorTol; } double getRelativeErrorTol() const { return relativeErrorTol; }
@ -71,6 +62,37 @@ public:
static Verbosity verbosityTranslator(const std::string &s) ; static Verbosity verbosityTranslator(const std::string &s) ;
static std::string verbosityTranslator(Verbosity value) ; static std::string verbosityTranslator(Verbosity value) ;
/** Type for an optional user-provided hook to be called after each
* internal optimizer iteration. See iterationHook below. */
using IterationHook = std::function<
void(size_t /*iteration*/, double/*errorBefore*/, double/*errorAfter*/)>;
/** Optional user-provided iteration hook to be called after each
* optimization iteration (Default: none).
* Note that `IterationHook` is defined as a std::function<> with this
* signature:
* \code
* void(size_t iteration, double errorBefore, double errorAfter)
* \endcode
* which allows binding by means of a reference to a regular function:
* \code
* void foo(size_t iteration, double errorBefore, double errorAfter);
* // ...
* lmOpts.iterationHook = &foo;
* \endcode
* or to a C++11 lambda (preferred if you need to capture additional
* context variables, such that the optimizer object itself, the factor graph,
* etc.):
* \code
* lmOpts.iterationHook = [&](size_t iter, double oldError, double newError)
* {
* // ...
* };
* \endcode
* or to the result of a properly-formed `std::bind` call.
*/
IterationHook iterationHook;
/** See NonlinearOptimizerParams::linearSolverType */ /** See NonlinearOptimizerParams::linearSolverType */
enum LinearSolverType { enum LinearSolverType {
MULTIFRONTAL_CHOLESKY, MULTIFRONTAL_CHOLESKY,
@ -81,10 +103,16 @@ public:
CHOLMOD, /* Experimental Flag */ CHOLMOD, /* Experimental Flag */
}; };
LinearSolverType linearSolverType; ///< The type of linear solver to use in the nonlinear optimizer LinearSolverType linearSolverType = MULTIFRONTAL_CHOLESKY; ///< The type of linear solver to use in the nonlinear optimizer
boost::optional<Ordering> ordering; ///< The optional variable elimination ordering, or empty to use COLAMD (default: empty) boost::optional<Ordering> ordering; ///< The optional variable elimination ordering, or empty to use COLAMD (default: empty)
IterativeOptimizationParameters::shared_ptr iterativeParams; ///< The container for iterativeOptimization parameters. used in CG Solvers. IterativeOptimizationParameters::shared_ptr iterativeParams; ///< The container for iterativeOptimization parameters. used in CG Solvers.
NonlinearOptimizerParams() = default;
virtual ~NonlinearOptimizerParams() {
}
virtual void print(const std::string& str = "") const;
inline bool isMultifrontal() const { inline bool isMultifrontal() const {
return (linearSolverType == MULTIFRONTAL_CHOLESKY) return (linearSolverType == MULTIFRONTAL_CHOLESKY)
|| (linearSolverType == MULTIFRONTAL_QR); || (linearSolverType == MULTIFRONTAL_QR);

View File

@ -41,7 +41,7 @@ struct Cal3Bundler0 : public Cal3Bundler {
double v0 = 0) double v0 = 0)
: Cal3Bundler(f, k1, k2, u0, v0) {} : Cal3Bundler(f, k1, k2, u0, v0) {}
Cal3Bundler0 retract(const Vector& d) const { Cal3Bundler0 retract(const Vector& d) const {
return Cal3Bundler0(fx() + d(0), k1() + d(1), k2() + d(2), u0(), v0()); return Cal3Bundler0(fx() + d(0), k1() + d(1), k2() + d(2), px(), py());
} }
Vector3 localCoordinates(const Cal3Bundler0& T2) const { Vector3 localCoordinates(const Cal3Bundler0& T2) const {
return T2.vector() - vector(); return T2.vector() - vector();

View File

@ -19,6 +19,7 @@
#include <CppUnitLite/TestHarness.h> #include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam/base/TestableAssertions.h>
#include <gtsam/inference/Symbol.h> #include <gtsam/inference/Symbol.h>
#include <gtsam/nonlinear/FunctorizedFactor.h> #include <gtsam/nonlinear/FunctorizedFactor.h>
#include <gtsam/nonlinear/factorTesting.h> #include <gtsam/nonlinear/factorTesting.h>
@ -115,16 +116,6 @@ TEST(FunctorizedFactor, Print) {
auto factor = auto factor =
MakeFunctorizedFactor<Matrix>(key, X, model, MultiplyFunctor(multiplier)); MakeFunctorizedFactor<Matrix>(key, X, model, MultiplyFunctor(multiplier));
// redirect output to buffer so we can compare
stringstream buffer;
streambuf *old = cout.rdbuf(buffer.rdbuf());
factor.print();
// get output string and reset stdout
string actual = buffer.str();
cout.rdbuf(old);
string expected = string expected =
" keys = { X0 }\n" " keys = { X0 }\n"
" noise model: unit (9) \n" " noise model: unit (9) \n"
@ -135,7 +126,7 @@ TEST(FunctorizedFactor, Print) {
"]\n" "]\n"
" noise model sigmas: 1 1 1 1 1 1 1 1 1\n"; " noise model sigmas: 1 1 1 1 1 1 1 1 1\n";
CHECK_EQUAL(expected, actual); EXPECT(assert_print_equal(expected, factor));
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -595,15 +595,7 @@ TEST(Values, Demangle) {
values.insert(key1, v); values.insert(key1, v);
string expected = "Values with 1 values:\nValue v1: (Eigen::Matrix<double, 1, 3, 1, 1, 3>)\n[\n 5, 6, 7\n]\n\n"; string expected = "Values with 1 values:\nValue v1: (Eigen::Matrix<double, 1, 3, 1, 1, 3>)\n[\n 5, 6, 7\n]\n\n";
stringstream buffer; EXPECT(assert_print_equal(expected, values));
streambuf * old = cout.rdbuf(buffer.rdbuf());
values.print();
string actual = buffer.str();
cout.rdbuf(old);
EXPECT(assert_equal(expected, actual));
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -111,10 +111,8 @@ void removeNodeFromGraph(const Key node,
graph.erase(node); graph.erase(node);
} }
MFAS::MFAS(const std::shared_ptr<vector<Key>>& nodes, MFAS::MFAS(const TranslationEdges& relativeTranslations,
const TranslationEdges& relativeTranslations, const Unit3& projectionDirection) {
const Unit3& projectionDirection)
: nodes_(nodes) {
// Iterate over edges, obtain weights by projecting // Iterate over edges, obtain weights by projecting
// their relativeTranslations along the projection direction // their relativeTranslations along the projection direction
for (const auto& measurement : relativeTranslations) { for (const auto& measurement : relativeTranslations) {
@ -123,8 +121,8 @@ MFAS::MFAS(const std::shared_ptr<vector<Key>>& nodes,
} }
} }
vector<Key> MFAS::computeOrdering() const { KeyVector MFAS::computeOrdering() const {
vector<Key> ordering; // Nodes in MFAS order (result). KeyVector ordering; // Nodes in MFAS order (result).
// A graph is an unordered map from keys to nodes. Each node contains a list // A graph is an unordered map from keys to nodes. Each node contains a list
// of its adjacent nodes. Create the graph from the edgeWeights. // of its adjacent nodes. Create the graph from the edgeWeights.
@ -142,7 +140,7 @@ vector<Key> MFAS::computeOrdering() const {
map<MFAS::KeyPair, double> MFAS::computeOutlierWeights() const { map<MFAS::KeyPair, double> MFAS::computeOutlierWeights() const {
// Find the ordering. // Find the ordering.
vector<Key> ordering = computeOrdering(); KeyVector ordering = computeOrdering();
// Create a map from the node key to its position in the ordering. This makes // Create a map from the node key to its position in the ordering. This makes
// it easier to lookup positions of different nodes. // it easier to lookup positions of different nodes.

View File

@ -56,50 +56,38 @@ class MFAS {
using TranslationEdges = std::vector<BinaryMeasurement<Unit3>>; using TranslationEdges = std::vector<BinaryMeasurement<Unit3>>;
private: private:
// pointer to nodes in the graph
const std::shared_ptr<std::vector<Key>> nodes_;
// edges with a direction such that all weights are positive // edges with a direction such that all weights are positive
// i.e, edges that originally had negative weights are flipped // i.e, edges that originally had negative weights are flipped
std::map<KeyPair, double> edgeWeights_; std::map<KeyPair, double> edgeWeights_;
public: public:
/** /**
* @brief Construct from the nodes in a graph and weighted directed edges * @brief Construct from the weighted directed edges
* between the nodes. Each node is identified by a Key. * between the nodes. Each node is identified by a Key.
* A shared pointer to the nodes is used as input parameter
* because, MFAS ordering is usually used to compute the ordering of a
* large graph that is already stored in memory. It is unnecessary to make a
* copy of the nodes in this class.
* @param nodes: Nodes in the graph
* @param edgeWeights: weights of edges in the graph * @param edgeWeights: weights of edges in the graph
*/ */
MFAS(const std::shared_ptr<std::vector<Key>> &nodes, MFAS(const std::map<KeyPair, double> &edgeWeights)
const std::map<KeyPair, double> &edgeWeights) : edgeWeights_(edgeWeights) {}
: nodes_(nodes), edgeWeights_(edgeWeights) {}
/** /**
* @brief Constructor to be used in the context of translation averaging. * @brief Constructor to be used in the context of translation averaging.
* Here, the nodes of the graph are cameras in 3D and the edges have a unit * Here, the nodes of the graph are cameras in 3D and the edges have a unit
* translation direction between them. The weights of the edges is computed by * translation direction between them. The weights of the edges is computed by
* projecting them along a projection direction. * projecting them along a projection direction.
* @param nodes cameras in the epipolar graph (each camera is identified by a
* Key)
* @param relativeTranslations translation directions between the cameras * @param relativeTranslations translation directions between the cameras
* @param projectionDirection direction in which edges are to be projected * @param projectionDirection direction in which edges are to be projected
*/ */
MFAS(const std::shared_ptr<std::vector<Key>> &nodes, MFAS(const TranslationEdges &relativeTranslations,
const TranslationEdges &relativeTranslations,
const Unit3 &projectionDirection); const Unit3 &projectionDirection);
/** /**
* @brief Computes the 1D MFAS ordering of nodes in the graph * @brief Computes the 1D MFAS ordering of nodes in the graph
* @return orderedNodes: vector of nodes in the obtained order * @return orderedNodes: vector of nodes in the obtained order
*/ */
std::vector<Key> computeOrdering() const; KeyVector computeOrdering() const;
/** /**
* @brief Computes the "outlier weights" of the graph. We define the outlier * @brief Computes the outlier weights of the graph. We define the outlier
* weight of a edge to be zero if the edge is an inlier and the magnitude of * weight of a edge to be zero if the edge is an inlier and the magnitude of
* its edgeWeight if it is an outlier. This function internally calls * its edgeWeight if it is an outlier. This function internally calls
* computeOrdering and uses the obtained ordering to identify outlier edges. * computeOrdering and uses the obtained ordering to identify outlier edges.
@ -108,4 +96,6 @@ class MFAS {
std::map<KeyPair, double> computeOutlierWeights() const; std::map<KeyPair, double> computeOutlierWeights() const;
}; };
typedef std::map<std::pair<Key, Key>, double> KeyPairDoubleMap;
} // namespace gtsam } // namespace gtsam

View File

@ -6,7 +6,7 @@
*/ */
#include <gtsam/sfm/MFAS.h> #include <gtsam/sfm/MFAS.h>
#include <iostream>
#include <CppUnitLite/TestHarness.h> #include <CppUnitLite/TestHarness.h>
using namespace std; using namespace std;
@ -25,7 +25,7 @@ using namespace gtsam;
vector<MFAS::KeyPair> edges = {make_pair(3, 2), make_pair(0, 1), make_pair(3, 1), vector<MFAS::KeyPair> edges = {make_pair(3, 2), make_pair(0, 1), make_pair(3, 1),
make_pair(1, 2), make_pair(0, 2), make_pair(3, 0)}; make_pair(1, 2), make_pair(0, 2), make_pair(3, 0)};
// nodes in the graph // nodes in the graph
vector<Key> nodes = {Key(0), Key(1), Key(2), Key(3)}; KeyVector nodes = {Key(0), Key(1), Key(2), Key(3)};
// weights from projecting in direction-1 (bad direction, outlier accepted) // weights from projecting in direction-1 (bad direction, outlier accepted)
vector<double> weights1 = {2, 1.5, 0.5, 0.25, 1, 0.75}; vector<double> weights1 = {2, 1.5, 0.5, 0.25, 1, 0.75};
// weights from projecting in direction-2 (good direction, outlier rejected) // weights from projecting in direction-2 (good direction, outlier rejected)
@ -39,19 +39,18 @@ map<MFAS::KeyPair, double> getEdgeWeights(const vector<MFAS::KeyPair> &edges,
for (size_t i = 0; i < edges.size(); i++) { for (size_t i = 0; i < edges.size(); i++) {
edgeWeights[edges[i]] = weights[i]; edgeWeights[edges[i]] = weights[i];
} }
cout << "returning edge weights " << edgeWeights.size() << endl;
return edgeWeights; return edgeWeights;
} }
// test the ordering and the outlierWeights function using weights2 - outlier // test the ordering and the outlierWeights function using weights2 - outlier
// edge is rejected when projected in a direction that gives weights2 // edge is rejected when projected in a direction that gives weights2
TEST(MFAS, OrderingWeights2) { TEST(MFAS, OrderingWeights2) {
MFAS mfas_obj(make_shared<vector<Key>>(nodes), getEdgeWeights(edges, weights2)); MFAS mfas_obj(getEdgeWeights(edges, weights2));
vector<Key> ordered_nodes = mfas_obj.computeOrdering(); KeyVector ordered_nodes = mfas_obj.computeOrdering();
// ground truth (expected) ordering in this example // ground truth (expected) ordering in this example
vector<Key> gt_ordered_nodes = {0, 1, 3, 2}; KeyVector gt_ordered_nodes = {0, 1, 3, 2};
// check if the expected ordering is obtained // check if the expected ordering is obtained
for (size_t i = 0; i < ordered_nodes.size(); i++) { for (size_t i = 0; i < ordered_nodes.size(); i++) {
@ -76,12 +75,12 @@ TEST(MFAS, OrderingWeights2) {
// weights1 (outlier edge is accepted when projected in a direction that // weights1 (outlier edge is accepted when projected in a direction that
// produces weights1) // produces weights1)
TEST(MFAS, OrderingWeights1) { TEST(MFAS, OrderingWeights1) {
MFAS mfas_obj(make_shared<vector<Key>>(nodes), getEdgeWeights(edges, weights1)); MFAS mfas_obj(getEdgeWeights(edges, weights1));
vector<Key> ordered_nodes = mfas_obj.computeOrdering(); KeyVector ordered_nodes = mfas_obj.computeOrdering();
// "ground truth" expected ordering in this example // "ground truth" expected ordering in this example
vector<Key> gt_ordered_nodes = {3, 0, 1, 2}; KeyVector gt_ordered_nodes = {3, 0, 1, 2};
// check if the expected ordering is obtained // check if the expected ordering is obtained
for (size_t i = 0; i < ordered_nodes.size(); i++) { for (size_t i = 0; i < ordered_nodes.size(); i++) {

View File

@ -1164,8 +1164,8 @@ bool writeBAL(const string &filename, SfmData &data) {
for (size_t k = 0; k < track.number_measurements(); for (size_t k = 0; k < track.number_measurements();
k++) { // for each observation of the 3D point j k++) { // for each observation of the 3D point j
size_t i = track.measurements[k].first; // camera id size_t i = track.measurements[k].first; // camera id
double u0 = data.cameras[i].calibration().u0(); double u0 = data.cameras[i].calibration().px();
double v0 = data.cameras[i].calibration().v0(); double v0 = data.cameras[i].calibration().py();
if (u0 != 0 || v0 != 0) { if (u0 != 0 || v0 != 0) {
cout << "writeBAL has not been tested for calibration with nonzero " cout << "writeBAL has not been tested for calibration with nonzero "

View File

@ -211,16 +211,18 @@ GTSAM_EXPORT GraphAndValues load3D(const std::string& filename);
/// A measurement with its camera index /// A measurement with its camera index
typedef std::pair<size_t, Point2> SfmMeasurement; typedef std::pair<size_t, Point2> SfmMeasurement;
/// SfmTrack /// Sift index for SfmTrack
typedef std::pair<size_t, size_t> SiftIndex; typedef std::pair<size_t, size_t> SiftIndex;
/// Define the structure for the 3D points /// Define the structure for the 3D points
struct SfmTrack { struct SfmTrack {
SfmTrack(): p(0,0,0) {} SfmTrack(): p(0,0,0) {}
SfmTrack(const gtsam::Point3& pt) : p(pt) {}
Point3 p; ///< 3D position of the point Point3 p; ///< 3D position of the point
float r, g, b; ///< RGB color of the 3D point float r, g, b; ///< RGB color of the 3D point
std::vector<SfmMeasurement> measurements; ///< The 2D image projections (id,(u,v)) std::vector<SfmMeasurement> measurements; ///< The 2D image projections (id,(u,v))
std::vector<SiftIndex> siftIndices; std::vector<SiftIndex> siftIndices;
/// Total number of measurements in this track /// Total number of measurements in this track
size_t number_measurements() const { size_t number_measurements() const {
return measurements.size(); return measurements.size();
@ -233,11 +235,17 @@ struct SfmTrack {
SiftIndex siftIndex(size_t idx) const { SiftIndex siftIndex(size_t idx) const {
return siftIndices[idx]; return siftIndices[idx];
} }
Point3 point3() const { /// Get 3D point
const Point3& point3() const {
return p; return p;
} }
/// Add measurement (camera_idx, Point2) to track
void add_measurement(size_t idx, const gtsam::Point2& m) {
measurements.emplace_back(idx, m);
}
}; };
/// Define the structure for the camera poses /// Define the structure for the camera poses
typedef PinholeCamera<Cal3Bundler> SfmCamera; typedef PinholeCamera<Cal3Bundler> SfmCamera;
@ -260,6 +268,14 @@ struct SfmData {
SfmTrack track(size_t idx) const { SfmTrack track(size_t idx) const {
return tracks[idx]; return tracks[idx];
} }
/// Add a track to SfmData
void add_track(const SfmTrack& t) {
tracks.push_back(t);
}
/// Add a camera to SfmData
void add_camera(const SfmCamera& cam){
cameras.push_back(cam);
}
}; };
/** /**

View File

@ -7,8 +7,3 @@ set (GTSAM_VERSION_STRING "@GTSAM_VERSION_STRING@")
set (GTSAM_USE_TBB @GTSAM_USE_TBB@) set (GTSAM_USE_TBB @GTSAM_USE_TBB@)
set (GTSAM_DEFAULT_ALLOCATOR @GTSAM_DEFAULT_ALLOCATOR@) set (GTSAM_DEFAULT_ALLOCATOR @GTSAM_DEFAULT_ALLOCATOR@)
if("@GTSAM_INSTALL_CYTHON_TOOLBOX@")
list(APPEND GTSAM_CYTHON_INSTALL_PATH "@GTSAM_CYTHON_INSTALL_PATH@")
list(APPEND GTSAM_EIGENCY_INSTALL_PATH "@GTSAM_EIGENCY_INSTALL_PATH@")
endif()

View File

@ -361,8 +361,12 @@ TEST(Similarity3, AlignPose3) {
vector<Pose3Pair> correspondences{bTa1, bTa2}; vector<Pose3Pair> correspondences{bTa1, bTa2};
// Cayley transform cannot accommodate 180 degree rotations,
// hence we only test for Expmap
#ifdef GTSAM_ROT3_EXPMAP
Similarity3 actual_aSb = Similarity3::Align(correspondences); Similarity3 actual_aSb = Similarity3::Align(correspondences);
EXPECT(assert_equal(expected_aSb, actual_aSb)); EXPECT(assert_equal(expected_aSb, actual_aSb));
#endif
} }
//****************************************************************************** //******************************************************************************

View File

@ -149,7 +149,7 @@ Template JacobianFactor::shared_ptr This::createDualFactor(
// to compute the least-square approximation of dual variables // to compute the least-square approximation of dual variables
return boost::make_shared<JacobianFactor>(Aterms, b); return boost::make_shared<JacobianFactor>(Aterms, b);
} else { } else {
return boost::make_shared<JacobianFactor>(); return nullptr;
} }
} }
@ -165,14 +165,13 @@ Template JacobianFactor::shared_ptr This::createDualFactor(
* if lambda = 0 you are on the constraint * if lambda = 0 you are on the constraint
* if lambda > 0 you are violating the constraint. * if lambda > 0 you are violating the constraint.
*/ */
Template GaussianFactorGraph::shared_ptr This::buildDualGraph( Template GaussianFactorGraph This::buildDualGraph(
const InequalityFactorGraph& workingSet, const VectorValues& delta) const { const InequalityFactorGraph& workingSet, const VectorValues& delta) const {
GaussianFactorGraph::shared_ptr dualGraph(new GaussianFactorGraph()); GaussianFactorGraph dualGraph;
for (Key key : constrainedKeys_) { for (Key key : constrainedKeys_) {
// Each constrained key becomes a factor in the dual graph // Each constrained key becomes a factor in the dual graph
JacobianFactor::shared_ptr dualFactor = auto dualFactor = createDualFactor(key, workingSet, delta);
createDualFactor(key, workingSet, delta); if (dualFactor) dualGraph.push_back(dualFactor);
if (!dualFactor->empty()) dualGraph->push_back(dualFactor);
} }
return dualGraph; return dualGraph;
} }
@ -193,19 +192,16 @@ This::buildWorkingGraph(const InequalityFactorGraph& workingSet,
Template typename This::State This::iterate( Template typename This::State This::iterate(
const typename This::State& state) const { const typename This::State& state) const {
// Algorithm 16.3 from Nocedal06book. // Algorithm 16.3 from Nocedal06book.
// Solve with the current working set eqn 16.39, but instead of solving for p // Solve with the current working set eqn 16.39, but solve for x not p
// solve for x auto workingGraph = buildWorkingGraph(state.workingSet, state.values);
GaussianFactorGraph workingGraph =
buildWorkingGraph(state.workingSet, state.values);
VectorValues newValues = workingGraph.optimize(); VectorValues newValues = workingGraph.optimize();
// If we CAN'T move further // If we CAN'T move further
// if p_k = 0 is the original condition, modified by Duy to say that the state // if p_k = 0 is the original condition, modified by Duy to say that the state
// update is zero. // update is zero.
if (newValues.equals(state.values, 1e-7)) { if (newValues.equals(state.values, 1e-7)) {
// Compute lambda from the dual graph // Compute lambda from the dual graph
GaussianFactorGraph::shared_ptr dualGraph = buildDualGraph(state.workingSet, auto dualGraph = buildDualGraph(state.workingSet, newValues);
newValues); VectorValues duals = dualGraph.optimize();
VectorValues duals = dualGraph->optimize();
int leavingFactor = identifyLeavingConstraint(state.workingSet, duals); int leavingFactor = identifyLeavingConstraint(state.workingSet, duals);
// If all inequality constraints are satisfied: We have the solution!! // If all inequality constraints are satisfied: We have the solution!!
if (leavingFactor < 0) { if (leavingFactor < 0) {

View File

@ -154,8 +154,8 @@ protected:
public: /// Just for testing... public: /// Just for testing...
/// Builds a dual graph from the current working set. /// Builds a dual graph from the current working set.
GaussianFactorGraph::shared_ptr buildDualGraph( GaussianFactorGraph buildDualGraph(const InequalityFactorGraph &workingSet,
const InequalityFactorGraph& workingSet, const VectorValues& delta) const; const VectorValues &delta) const;
/** /**
* Build a working graph of cost, equality and active inequality constraints * Build a working graph of cost, equality and active inequality constraints

View File

@ -31,6 +31,11 @@ class EqualityFactorGraph: public FactorGraph<LinearEquality> {
public: public:
typedef boost::shared_ptr<EqualityFactorGraph> shared_ptr; typedef boost::shared_ptr<EqualityFactorGraph> shared_ptr;
/// Add a linear inequality, forwards arguments to LinearInequality.
template <class... Args> void add(Args &&... args) {
emplace_shared<LinearEquality>(std::forward<Args>(args)...);
}
/// Compute error of a guess. /// Compute error of a guess.
double error(const VectorValues& x) const { double error(const VectorValues& x) const {
double total_error = 0.; double total_error = 0.;

View File

@ -47,6 +47,11 @@ public:
return Base::equals(other, tol); return Base::equals(other, tol);
} }
/// Add a linear inequality, forwards arguments to LinearInequality.
template <class... Args> void add(Args &&... args) {
emplace_shared<LinearInequality>(std::forward<Args>(args)...);
}
/** /**
* Compute error of a guess. * Compute error of a guess.
* Infinity error if it violates an inequality; zero otherwise. */ * Infinity error if it violates an inequality; zero otherwise. */

View File

@ -21,7 +21,6 @@
#include <gtsam_unstable/linear/LP.h> #include <gtsam_unstable/linear/LP.h>
#include <gtsam/linear/GaussianFactorGraph.h> #include <gtsam/linear/GaussianFactorGraph.h>
#include <CppUnitLite/Test.h>
namespace gtsam { namespace gtsam {
/** /**
@ -83,7 +82,7 @@ private:
const InequalityFactorGraph& inequalities) const; const InequalityFactorGraph& inequalities) const;
// friend class for unit-testing private methods // friend class for unit-testing private methods
FRIEND_TEST(LPInitSolver, initialization); friend class LPInitSolverInitializationTest;
}; };
} }

View File

@ -16,21 +16,23 @@
* @author Duy-Nguyen Ta * @author Duy-Nguyen Ta
*/ */
#include <gtsam_unstable/linear/LPInitSolver.h>
#include <gtsam_unstable/linear/LPSolver.h>
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/inference/FactorGraph-inst.h> #include <gtsam/inference/FactorGraph-inst.h>
#include <gtsam/linear/VectorValues.h> #include <gtsam/inference/Symbol.h>
#include <gtsam/linear/GaussianFactorGraph.h> #include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/VectorValues.h>
#include <gtsam_unstable/linear/EqualityFactorGraph.h> #include <gtsam_unstable/linear/EqualityFactorGraph.h>
#include <gtsam_unstable/linear/InequalityFactorGraph.h> #include <gtsam_unstable/linear/InequalityFactorGraph.h>
#include <gtsam_unstable/linear/InfeasibleInitialValues.h> #include <gtsam_unstable/linear/InfeasibleInitialValues.h>
#include <CppUnitLite/TestHarness.h> #include <CppUnitLite/TestHarness.h>
#include <boost/foreach.hpp> #include <boost/foreach.hpp>
#include <boost/range/adaptor/map.hpp> #include <boost/range/adaptor/map.hpp>
#include <gtsam_unstable/linear/LPSolver.h>
#include <gtsam_unstable/linear/LPInitSolver.h>
using namespace std; using namespace std;
using namespace gtsam; using namespace gtsam;
using namespace gtsam::symbol_shorthand; using namespace gtsam::symbol_shorthand;
@ -47,37 +49,27 @@ static const Vector kOne = Vector::Ones(1), kZero = Vector::Zero(1);
*/ */
LP simpleLP1() { LP simpleLP1() {
LP lp; LP lp;
lp.cost = LinearCost(1, Vector2(-1., -1.)); // min -x1-x2 (max x1+x2) lp.cost = LinearCost(1, Vector2(-1., -1.)); // min -x1-x2 (max x1+x2)
lp.inequalities.push_back( lp.inequalities.add(1, Vector2(-1, 0), 0, 1); // x1 >= 0
LinearInequality(1, Vector2(-1, 0), 0, 1)); // x1 >= 0 lp.inequalities.add(1, Vector2(0, -1), 0, 2); // x2 >= 0
lp.inequalities.push_back( lp.inequalities.add(1, Vector2(1, 2), 4, 3); // x1 + 2*x2 <= 4
LinearInequality(1, Vector2(0, -1), 0, 2)); // x2 >= 0 lp.inequalities.add(1, Vector2(4, 2), 12, 4); // 4x1 + 2x2 <= 12
lp.inequalities.push_back( lp.inequalities.add(1, Vector2(-1, 1), 1, 5); // -x1 + x2 <= 1
LinearInequality(1, Vector2(1, 2), 4, 3)); // x1 + 2*x2 <= 4
lp.inequalities.push_back(
LinearInequality(1, Vector2(4, 2), 12, 4)); // 4x1 + 2x2 <= 12
lp.inequalities.push_back(
LinearInequality(1, Vector2(-1, 1), 1, 5)); // -x1 + x2 <= 1
return lp; return lp;
} }
/* ************************************************************************* */ /* ************************************************************************* */
namespace gtsam { namespace gtsam {
TEST(LPInitSolver, infinite_loop_single_var) { TEST(LPInitSolver, InfiniteLoopSingleVar) {
LP initchecker; LP lp;
initchecker.cost = LinearCost(1, Vector3(0, 0, 1)); // min alpha lp.cost = LinearCost(1, Vector3(0, 0, 1)); // min alpha
initchecker.inequalities.push_back( lp.inequalities.add(1, Vector3(-2, -1, -1), -2, 1); //-2x-y-a <= -2
LinearInequality(1, Vector3(-2, -1, -1), -2, 1)); //-2x-y-alpha <= -2 lp.inequalities.add(1, Vector3(-1, 2, -1), 6, 2); // -x+2y-a <= 6
initchecker.inequalities.push_back( lp.inequalities.add(1, Vector3(-1, 0, -1), 0, 3); // -x - a <= 0
LinearInequality(1, Vector3(-1, 2, -1), 6, 2)); // -x+2y-alpha <= 6 lp.inequalities.add(1, Vector3(1, 0, -1), 20, 4); // x - a <= 20
initchecker.inequalities.push_back( lp.inequalities.add(1, Vector3(0, -1, -1), 0, 5); // -y - a <= 0
LinearInequality(1, Vector3(-1, 0, -1), 0, 3)); // -x - alpha <= 0 LPSolver solver(lp);
initchecker.inequalities.push_back(
LinearInequality(1, Vector3(1, 0, -1), 20, 4)); // x - alpha <= 20
initchecker.inequalities.push_back(
LinearInequality(1, Vector3(0, -1, -1), 0, 5)); // -y - alpha <= 0
LPSolver solver(initchecker);
VectorValues starter; VectorValues starter;
starter.insert(1, Vector3(0, 0, 2)); starter.insert(1, Vector3(0, 0, 2));
VectorValues results, duals; VectorValues results, duals;
@ -87,25 +79,23 @@ TEST(LPInitSolver, infinite_loop_single_var) {
CHECK(assert_equal(results, expected, 1e-7)); CHECK(assert_equal(results, expected, 1e-7));
} }
TEST(LPInitSolver, infinite_loop_multi_var) { TEST(LPInitSolver, InfiniteLoopMultiVar) {
LP initchecker; LP lp;
Key X = symbol('X', 1); Key X = symbol('X', 1);
Key Y = symbol('Y', 1); Key Y = symbol('Y', 1);
Key Z = symbol('Z', 1); Key Z = symbol('Z', 1);
initchecker.cost = LinearCost(Z, kOne); // min alpha lp.cost = LinearCost(Z, kOne); // min alpha
initchecker.inequalities.push_back( lp.inequalities.add(X, -2.0 * kOne, Y, -1.0 * kOne, Z, -1.0 * kOne, -2,
LinearInequality(X, -2.0 * kOne, Y, -1.0 * kOne, Z, -1.0 * kOne, -2, 1); //-2x-y-alpha <= -2
1)); //-2x-y-alpha <= -2 lp.inequalities.add(X, -1.0 * kOne, Y, 2.0 * kOne, Z, -1.0 * kOne, 6,
initchecker.inequalities.push_back( 2); // -x+2y-alpha <= 6
LinearInequality(X, -1.0 * kOne, Y, 2.0 * kOne, Z, -1.0 * kOne, 6, lp.inequalities.add(X, -1.0 * kOne, Z, -1.0 * kOne, 0,
2)); // -x+2y-alpha <= 6 3); // -x - alpha <= 0
initchecker.inequalities.push_back(LinearInequality( lp.inequalities.add(X, 1.0 * kOne, Z, -1.0 * kOne, 20,
X, -1.0 * kOne, Z, -1.0 * kOne, 0, 3)); // -x - alpha <= 0 4); // x - alpha <= 20
initchecker.inequalities.push_back(LinearInequality( lp.inequalities.add(Y, -1.0 * kOne, Z, -1.0 * kOne, 0,
X, 1.0 * kOne, Z, -1.0 * kOne, 20, 4)); // x - alpha <= 20 5); // -y - alpha <= 0
initchecker.inequalities.push_back(LinearInequality( LPSolver solver(lp);
Y, -1.0 * kOne, Z, -1.0 * kOne, 0, 5)); // -y - alpha <= 0
LPSolver solver(initchecker);
VectorValues starter; VectorValues starter;
starter.insert(X, kZero); starter.insert(X, kZero);
starter.insert(Y, kZero); starter.insert(Y, kZero);
@ -119,7 +109,7 @@ TEST(LPInitSolver, infinite_loop_multi_var) {
CHECK(assert_equal(results, expected, 1e-7)); CHECK(assert_equal(results, expected, 1e-7));
} }
TEST(LPInitSolver, initialization) { TEST(LPInitSolver, Initialization) {
LP lp = simpleLP1(); LP lp = simpleLP1();
LPInitSolver initSolver(lp); LPInitSolver initSolver(lp);
@ -138,19 +128,19 @@ TEST(LPInitSolver, initialization) {
LP::shared_ptr initLP = initSolver.buildInitialLP(yKey); LP::shared_ptr initLP = initSolver.buildInitialLP(yKey);
LP expectedInitLP; LP expectedInitLP;
expectedInitLP.cost = LinearCost(yKey, kOne); expectedInitLP.cost = LinearCost(yKey, kOne);
expectedInitLP.inequalities.push_back(LinearInequality( expectedInitLP.inequalities.add(1, Vector2(-1, 0), 2, Vector::Constant(1, -1),
1, Vector2(-1, 0), 2, Vector::Constant(1, -1), 0, 1)); // -x1 - y <= 0 0, 1); // -x1 - y <= 0
expectedInitLP.inequalities.push_back(LinearInequality( expectedInitLP.inequalities.add(1, Vector2(0, -1), 2, Vector::Constant(1, -1),
1, Vector2(0, -1), 2, Vector::Constant(1, -1), 0, 2)); // -x2 - y <= 0 0, 2); // -x2 - y <= 0
expectedInitLP.inequalities.push_back( expectedInitLP.inequalities.add(1, Vector2(1, 2), 2, Vector::Constant(1, -1),
LinearInequality(1, Vector2(1, 2), 2, Vector::Constant(1, -1), 4, 4,
3)); // x1 + 2*x2 - y <= 4 3); // x1 + 2*x2 - y <= 4
expectedInitLP.inequalities.push_back( expectedInitLP.inequalities.add(1, Vector2(4, 2), 2, Vector::Constant(1, -1),
LinearInequality(1, Vector2(4, 2), 2, Vector::Constant(1, -1), 12, 12,
4)); // 4x1 + 2x2 - y <= 12 4); // 4x1 + 2x2 - y <= 12
expectedInitLP.inequalities.push_back( expectedInitLP.inequalities.add(1, Vector2(-1, 1), 2, Vector::Constant(1, -1),
LinearInequality(1, Vector2(-1, 1), 2, Vector::Constant(1, -1), 1, 1,
5)); // -x1 + x2 - y <= 1 5); // -x1 + x2 - y <= 1
CHECK(assert_equal(expectedInitLP, *initLP, 1e-10)); CHECK(assert_equal(expectedInitLP, *initLP, 1e-10));
LPSolver lpSolveInit(*initLP); LPSolver lpSolveInit(*initLP);
VectorValues xy0(x0); VectorValues xy0(x0);
@ -164,7 +154,7 @@ TEST(LPInitSolver, initialization) {
VectorValues x = initSolver.solve(); VectorValues x = initSolver.solve();
CHECK(lp.isFeasible(x)); CHECK(lp.isFeasible(x));
} }
} } // namespace gtsam
/* ************************************************************************* */ /* ************************************************************************* */
/** /**
@ -173,28 +163,24 @@ TEST(LPInitSolver, initialization) {
* x - y = 5 * x - y = 5
* x + 2y = 6 * x + 2y = 6
*/ */
TEST(LPSolver, overConstrainedLinearSystem) { TEST(LPSolver, OverConstrainedLinearSystem) {
GaussianFactorGraph graph; GaussianFactorGraph graph;
Matrix A1 = Vector3(1, 1, 1); Matrix A1 = Vector3(1, 1, 1);
Matrix A2 = Vector3(1, -1, 2); Matrix A2 = Vector3(1, -1, 2);
Vector b = Vector3(1, 5, 6); Vector b = Vector3(1, 5, 6);
JacobianFactor factor(1, A1, 2, A2, b, noiseModel::Constrained::All(3)); graph.add(1, A1, 2, A2, b, noiseModel::Constrained::All(3));
graph.push_back(factor);
VectorValues x = graph.optimize(); VectorValues x = graph.optimize();
// This check confirms that gtsam linear constraint solver can't handle // This check confirms that gtsam linear constraint solver can't handle
// over-constrained system // over-constrained system
CHECK(factor.error(x) != 0.0); CHECK(graph[0]->error(x) != 0.0);
} }
TEST(LPSolver, overConstrainedLinearSystem2) { TEST(LPSolver, overConstrainedLinearSystem2) {
GaussianFactorGraph graph; GaussianFactorGraph graph;
graph.emplace_shared<JacobianFactor>(1, I_1x1, 2, I_1x1, kOne, graph.add(1, I_1x1, 2, I_1x1, kOne, noiseModel::Constrained::All(1));
noiseModel::Constrained::All(1)); graph.add(1, I_1x1, 2, -I_1x1, 5 * kOne, noiseModel::Constrained::All(1));
graph.emplace_shared<JacobianFactor>(1, I_1x1, 2, -I_1x1, 5 * kOne, graph.add(1, I_1x1, 2, 2 * I_1x1, 6 * kOne, noiseModel::Constrained::All(1));
noiseModel::Constrained::All(1));
graph.emplace_shared<JacobianFactor>(1, I_1x1, 2, 2 * I_1x1, 6 * kOne,
noiseModel::Constrained::All(1));
VectorValues x = graph.optimize(); VectorValues x = graph.optimize();
// This check confirms that gtsam linear constraint solver can't handle // This check confirms that gtsam linear constraint solver can't handle
// over-constrained system // over-constrained system
@ -202,7 +188,7 @@ TEST(LPSolver, overConstrainedLinearSystem2) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(LPSolver, simpleTest1) { TEST(LPSolver, SimpleTest1) {
LP lp = simpleLP1(); LP lp = simpleLP1();
LPSolver lpSolver(lp); LPSolver lpSolver(lp);
VectorValues init; VectorValues init;
@ -222,7 +208,7 @@ TEST(LPSolver, simpleTest1) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(LPSolver, testWithoutInitialValues) { TEST(LPSolver, TestWithoutInitialValues) {
LP lp = simpleLP1(); LP lp = simpleLP1();
LPSolver lpSolver(lp); LPSolver lpSolver(lp);
VectorValues result, duals, expectedResult; VectorValues result, duals, expectedResult;

View File

@ -17,10 +17,12 @@
* @author Ivan Dario Jimenez * @author Ivan Dario Jimenez
*/ */
#include <gtsam_unstable/linear/QPSParser.h>
#include <gtsam_unstable/linear/QPSolver.h>
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam/inference/Symbol.h> #include <gtsam/inference/Symbol.h>
#include <gtsam_unstable/linear/QPSolver.h>
#include <gtsam_unstable/linear/QPSParser.h>
#include <CppUnitLite/TestHarness.h> #include <CppUnitLite/TestHarness.h>
using namespace std; using namespace std;
@ -40,15 +42,15 @@ QP createTestCase() {
// Hence, we have G11=2, G12 = -1, g1 = +3, G22 = 2, g2 = 0, f = 10 // Hence, we have G11=2, G12 = -1, g1 = +3, G22 = 2, g2 = 0, f = 10
//TODO: THIS TEST MIGHT BE WRONG : the last parameter might be 5 instead of 10 because the form of the equation //TODO: THIS TEST MIGHT BE WRONG : the last parameter might be 5 instead of 10 because the form of the equation
// Should be 0.5x'Gx + gx + f : Nocedal 449 // Should be 0.5x'Gx + gx + f : Nocedal 449
qp.cost.push_back( qp.cost.push_back(HessianFactor(X(1), X(2), 2.0 * I_1x1, -I_1x1, 3.0 * I_1x1,
HessianFactor(X(1), X(2), 2.0 * I_1x1, -I_1x1, 3.0 * I_1x1, 2.0 * I_1x1, 2.0 * I_1x1, Z_1x1, 10.0));
Z_1x1, 10.0));
// Inequality constraints // Inequality constraints
qp.inequalities.push_back(LinearInequality(X(1), I_1x1, X(2), I_1x1, 2, 0)); // x1 + x2 <= 2 --> x1 + x2 -2 <= 0, --> b=2 qp.inequalities.add(X(1), I_1x1, X(2), I_1x1, 2,
qp.inequalities.push_back(LinearInequality(X(1), -I_1x1, 0, 1)); // -x1 <= 0 0); // x1 + x2 <= 2 --> x1 + x2 -2 <= 0, --> b=2
qp.inequalities.push_back(LinearInequality(X(2), -I_1x1, 0, 2)); // -x2 <= 0 qp.inequalities.add(X(1), -I_1x1, 0, 1); // -x1 <= 0
qp.inequalities.push_back(LinearInequality(X(1), I_1x1, 1.5, 3)); // x1 <= 3/2 qp.inequalities.add(X(2), -I_1x1, 0, 2); // -x2 <= 0
qp.inequalities.add(X(1), I_1x1, 1.5, 3); // x1 <= 3/2
return qp; return qp;
} }
@ -94,16 +96,15 @@ QP createEqualityConstrainedTest() {
// Note the Hessian encodes: // Note the Hessian encodes:
// 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f
// Hence, we have G11=2, G12 = 0, g1 = 0, G22 = 2, g2 = 0, f = 0 // Hence, we have G11=2, G12 = 0, g1 = 0, G22 = 2, g2 = 0, f = 0
qp.cost.push_back( qp.cost.push_back(HessianFactor(X(1), X(2), 2.0 * I_1x1, Z_1x1, Z_1x1,
HessianFactor(X(1), X(2), 2.0 * I_1x1, Z_1x1, Z_1x1, 2.0 * I_1x1, Z_1x1, 2.0 * I_1x1, Z_1x1, 0.0));
0.0));
// Equality constraints // Equality constraints
// x1 + x2 = 1 --> x1 + x2 -1 = 0, hence we negate the b vector // x1 + x2 = 1 --> x1 + x2 -1 = 0, hence we negate the b vector
Matrix A1 = I_1x1; Matrix A1 = I_1x1;
Matrix A2 = I_1x1; Matrix A2 = I_1x1;
Vector b = -kOne; Vector b = -kOne;
qp.equalities.push_back(LinearEquality(X(1), A1, X(2), A2, b, 0)); qp.equalities.add(X(1), A1, X(2), A2, b, 0);
return qp; return qp;
} }
@ -118,9 +119,8 @@ TEST(QPSolver, dual) {
QPSolver solver(qp); QPSolver solver(qp);
GaussianFactorGraph::shared_ptr dualGraph = solver.buildDualGraph( auto dualGraph = solver.buildDualGraph(qp.inequalities, initialValues);
qp.inequalities, initialValues); VectorValues dual = dualGraph.optimize();
VectorValues dual = dualGraph->optimize();
VectorValues expectedDual; VectorValues expectedDual;
expectedDual.insert(0, (Vector(1) << 2.0).finished()); expectedDual.insert(0, (Vector(1) << 2.0).finished());
CHECK(assert_equal(expectedDual, dual, 1e-10)); CHECK(assert_equal(expectedDual, dual, 1e-10));
@ -135,19 +135,19 @@ TEST(QPSolver, indentifyActiveConstraints) {
currentSolution.insert(X(1), Z_1x1); currentSolution.insert(X(1), Z_1x1);
currentSolution.insert(X(2), Z_1x1); currentSolution.insert(X(2), Z_1x1);
InequalityFactorGraph workingSet = solver.identifyActiveConstraints( auto workingSet =
qp.inequalities, currentSolution); solver.identifyActiveConstraints(qp.inequalities, currentSolution);
CHECK(!workingSet.at(0)->active()); // inactive CHECK(!workingSet.at(0)->active()); // inactive
CHECK(workingSet.at(1)->active());// active CHECK(workingSet.at(1)->active()); // active
CHECK(workingSet.at(2)->active());// active CHECK(workingSet.at(2)->active()); // active
CHECK(!workingSet.at(3)->active());// inactive CHECK(!workingSet.at(3)->active()); // inactive
VectorValues solution = solver.buildWorkingGraph(workingSet).optimize(); VectorValues solution = solver.buildWorkingGraph(workingSet).optimize();
VectorValues expectedSolution; VectorValues expected;
expectedSolution.insert(X(1), kZero); expected.insert(X(1), kZero);
expectedSolution.insert(X(2), kZero); expected.insert(X(2), kZero);
CHECK(assert_equal(expectedSolution, solution, 1e-100)); CHECK(assert_equal(expected, solution, 1e-100));
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -159,24 +159,24 @@ TEST(QPSolver, iterate) {
currentSolution.insert(X(1), Z_1x1); currentSolution.insert(X(1), Z_1x1);
currentSolution.insert(X(2), Z_1x1); currentSolution.insert(X(2), Z_1x1);
std::vector<VectorValues> expectedSolutions(4), expectedDuals(4); std::vector<VectorValues> expected(4), expectedDuals(4);
expectedSolutions[0].insert(X(1), kZero); expected[0].insert(X(1), kZero);
expectedSolutions[0].insert(X(2), kZero); expected[0].insert(X(2), kZero);
expectedDuals[0].insert(1, (Vector(1) << 3).finished()); expectedDuals[0].insert(1, (Vector(1) << 3).finished());
expectedDuals[0].insert(2, kZero); expectedDuals[0].insert(2, kZero);
expectedSolutions[1].insert(X(1), (Vector(1) << 1.5).finished()); expected[1].insert(X(1), (Vector(1) << 1.5).finished());
expectedSolutions[1].insert(X(2), kZero); expected[1].insert(X(2), kZero);
expectedDuals[1].insert(3, (Vector(1) << 1.5).finished()); expectedDuals[1].insert(3, (Vector(1) << 1.5).finished());
expectedSolutions[2].insert(X(1), (Vector(1) << 1.5).finished()); expected[2].insert(X(1), (Vector(1) << 1.5).finished());
expectedSolutions[2].insert(X(2), (Vector(1) << 0.75).finished()); expected[2].insert(X(2), (Vector(1) << 0.75).finished());
expectedSolutions[3].insert(X(1), (Vector(1) << 1.5).finished()); expected[3].insert(X(1), (Vector(1) << 1.5).finished());
expectedSolutions[3].insert(X(2), (Vector(1) << 0.5).finished()); expected[3].insert(X(2), (Vector(1) << 0.5).finished());
InequalityFactorGraph workingSet = solver.identifyActiveConstraints( auto workingSet =
qp.inequalities, currentSolution); solver.identifyActiveConstraints(qp.inequalities, currentSolution);
QPSolver::State state(currentSolution, VectorValues(), workingSet, false, QPSolver::State state(currentSolution, VectorValues(), workingSet, false,
100); 100);
@ -188,12 +188,12 @@ TEST(QPSolver, iterate) {
// Forst10book do not follow exactly what we implemented from Nocedal06book. // Forst10book do not follow exactly what we implemented from Nocedal06book.
// Specifically, we do not re-identify active constraints and // Specifically, we do not re-identify active constraints and
// do not recompute dual variables after every step!!! // do not recompute dual variables after every step!!!
// CHECK(assert_equal(expectedSolutions[it], state.values, 1e-10)); // CHECK(assert_equal(expected[it], state.values, 1e-10));
// CHECK(assert_equal(expectedDuals[it], state.duals, 1e-10)); // CHECK(assert_equal(expectedDuals[it], state.duals, 1e-10));
it++; it++;
} }
CHECK(assert_equal(expectedSolutions[3], state.values, 1e-10)); CHECK(assert_equal(expected[3], state.values, 1e-10));
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -204,182 +204,161 @@ TEST(QPSolver, optimizeForst10book_pg171Ex5) {
VectorValues initialValues; VectorValues initialValues;
initialValues.insert(X(1), Z_1x1); initialValues.insert(X(1), Z_1x1);
initialValues.insert(X(2), Z_1x1); initialValues.insert(X(2), Z_1x1);
VectorValues solution; VectorValues solution = solver.optimize(initialValues).first;
boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); VectorValues expected;
VectorValues expectedSolution; expected.insert(X(1), (Vector(1) << 1.5).finished());
expectedSolution.insert(X(1), (Vector(1) << 1.5).finished()); expected.insert(X(2), (Vector(1) << 0.5).finished());
expectedSolution.insert(X(2), (Vector(1) << 0.5).finished()); CHECK(assert_equal(expected, solution, 1e-100));
CHECK(assert_equal(expectedSolution, solution, 1e-100));
} }
pair<QP, QP> testParser(QPSParser parser) { pair<QP, QP> testParser(QPSParser parser) {
QP exampleqp = parser.Parse(); QP exampleqp = parser.Parse();
QP expectedqp; QP expected;
Key X1(Symbol('X', 1)), X2(Symbol('X', 2)); Key X1(Symbol('X', 1)), X2(Symbol('X', 2));
// min f(x,y) = 4 + 1.5x -y + 0.58x^2 + 2xy + 2yx + 10y^2 // min f(x,y) = 4 + 1.5x -y + 0.58x^2 + 2xy + 2yx + 10y^2
expectedqp.cost.push_back( expected.cost.push_back(HessianFactor(X1, X2, 8.0 * I_1x1, 2.0 * I_1x1,
HessianFactor(X1, X2, 8.0 * I_1x1, 2.0 * I_1x1, -1.5 * kOne, 10.0 * I_1x1, -1.5 * kOne, 10.0 * I_1x1, 2.0 * kOne,
2.0 * kOne, 8.0)); 8.0));
// 2x + y >= 2
// -x + 2y <= 6 expected.inequalities.add(X1, -2.0 * I_1x1, X2, -I_1x1, -2, 0); // 2x + y >= 2
expectedqp.inequalities.push_back( expected.inequalities.add(X1, -I_1x1, X2, 2.0 * I_1x1, 6, 1); // -x + 2y <= 6
LinearInequality(X1, -2.0 * I_1x1, X2, -I_1x1, -2, 0)); expected.inequalities.add(X1, I_1x1, 20, 4); // x<= 20
expectedqp.inequalities.push_back( expected.inequalities.add(X1, -I_1x1, 0, 2); // x >= 0
LinearInequality(X1, -I_1x1, X2, 2.0 * I_1x1, 6, 1)); expected.inequalities.add(X2, -I_1x1, 0, 3); // y > = 0
// x<= 20 return {expected, exampleqp};
expectedqp.inequalities.push_back(LinearInequality(X1, I_1x1, 20, 4)); };
//x >= 0
expectedqp.inequalities.push_back(LinearInequality(X1, -I_1x1, 0, 2));
// y > = 0
expectedqp.inequalities.push_back(LinearInequality(X2, -I_1x1, 0, 3));
return std::make_pair(expectedqp, exampleqp);
}
;
TEST(QPSolver, ParserSyntaticTest) { TEST(QPSolver, ParserSyntaticTest) {
auto expectedActual = testParser(QPSParser("QPExample.QPS")); auto result = testParser(QPSParser("QPExample.QPS"));
CHECK(assert_equal(expectedActual.first.cost, expectedActual.second.cost, CHECK(assert_equal(result.first.cost, result.second.cost, 1e-7));
CHECK(assert_equal(result.first.inequalities, result.second.inequalities,
1e-7)); 1e-7));
CHECK(assert_equal(expectedActual.first.inequalities, CHECK(assert_equal(result.first.equalities, result.second.equalities, 1e-7));
expectedActual.second.inequalities, 1e-7));
CHECK(assert_equal(expectedActual.first.equalities,
expectedActual.second.equalities, 1e-7));
} }
TEST(QPSolver, ParserSemanticTest) { TEST(QPSolver, ParserSemanticTest) {
auto expected_actual = testParser(QPSParser("QPExample.QPS")); auto result = testParser(QPSParser("QPExample.QPS"));
VectorValues actualSolution, expectedSolution; VectorValues expected = QPSolver(result.first).optimize().first;
boost::tie(expectedSolution, boost::tuples::ignore) = VectorValues actual = QPSolver(result.second).optimize().first;
QPSolver(expected_actual.first).optimize(); CHECK(assert_equal(actual, expected, 1e-7));
boost::tie(actualSolution, boost::tuples::ignore) =
QPSolver(expected_actual.second).optimize();
CHECK(assert_equal(actualSolution, expectedSolution, 1e-7));
} }
TEST(QPSolver, QPExampleTest){ TEST(QPSolver, QPExampleTest) {
QP problem = QPSParser("QPExample.QPS").Parse(); QP problem = QPSParser("QPExample.QPS").Parse();
VectorValues actualSolution;
auto solver = QPSolver(problem); auto solver = QPSolver(problem);
boost::tie(actualSolution, boost::tuples::ignore) = solver.optimize(); VectorValues actual = solver.optimize().first;
VectorValues expectedSolution; VectorValues expected;
expectedSolution.insert(Symbol('X',1),0.7625*I_1x1); expected.insert(Symbol('X', 1), 0.7625 * I_1x1);
expectedSolution.insert(Symbol('X',2),0.4750*I_1x1); expected.insert(Symbol('X', 2), 0.4750 * I_1x1);
double error_expected = problem.cost.error(expectedSolution); double error_expected = problem.cost.error(expected);
double error_actual = problem.cost.error(actualSolution); double error_actual = problem.cost.error(actual);
CHECK(assert_equal(expectedSolution, actualSolution, 1e-7)) CHECK(assert_equal(expected, actual, 1e-7))
CHECK(assert_equal(error_expected, error_actual)) CHECK(assert_equal(error_expected, error_actual))
} }
TEST(QPSolver, HS21) { TEST(QPSolver, HS21) {
QP problem = QPSParser("HS21.QPS").Parse(); QP problem = QPSParser("HS21.QPS").Parse();
VectorValues actualSolution; VectorValues expected;
VectorValues expectedSolution; expected.insert(Symbol('X', 1), 2.0 * I_1x1);
expectedSolution.insert(Symbol('X',1), 2.0*I_1x1); expected.insert(Symbol('X', 2), 0.0 * I_1x1);
expectedSolution.insert(Symbol('X',2), 0.0*I_1x1); VectorValues actual = QPSolver(problem).optimize().first;
boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); double error_actual = problem.cost.error(actual);
double error_actual = problem.cost.error(actualSolution);
CHECK(assert_equal(-99.9599999, error_actual, 1e-7)) CHECK(assert_equal(-99.9599999, error_actual, 1e-7))
CHECK(assert_equal(expectedSolution, actualSolution)) CHECK(assert_equal(expected, actual))
} }
TEST(QPSolver, HS35) { TEST(QPSolver, HS35) {
QP problem = QPSParser("HS35.QPS").Parse(); QP problem = QPSParser("HS35.QPS").Parse();
VectorValues actualSolution; VectorValues actual = QPSolver(problem).optimize().first;
boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); double error_actual = problem.cost.error(actual);
double error_actual = problem.cost.error(actualSolution); CHECK(assert_equal(1.11111111e-01, error_actual, 1e-7))
CHECK(assert_equal(1.11111111e-01,error_actual, 1e-7))
} }
TEST(QPSolver, HS35MOD) { TEST(QPSolver, HS35MOD) {
QP problem = QPSParser("HS35MOD.QPS").Parse(); QP problem = QPSParser("HS35MOD.QPS").Parse();
VectorValues actualSolution; VectorValues actual = QPSolver(problem).optimize().first;
boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); double error_actual = problem.cost.error(actual);
double error_actual = problem.cost.error(actualSolution); CHECK(assert_equal(2.50000001e-01, error_actual, 1e-7))
CHECK(assert_equal(2.50000001e-01,error_actual, 1e-7))
} }
TEST(QPSolver, HS51) { TEST(QPSolver, HS51) {
QP problem = QPSParser("HS51.QPS").Parse(); QP problem = QPSParser("HS51.QPS").Parse();
VectorValues actualSolution; VectorValues actual = QPSolver(problem).optimize().first;
boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); double error_actual = problem.cost.error(actual);
double error_actual = problem.cost.error(actualSolution); CHECK(assert_equal(8.88178420e-16, error_actual, 1e-7))
CHECK(assert_equal(8.88178420e-16,error_actual, 1e-7))
} }
TEST(QPSolver, HS52) { TEST(QPSolver, HS52) {
QP problem = QPSParser("HS52.QPS").Parse(); QP problem = QPSParser("HS52.QPS").Parse();
VectorValues actualSolution; VectorValues actual = QPSolver(problem).optimize().first;
boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); double error_actual = problem.cost.error(actual);
double error_actual = problem.cost.error(actualSolution); CHECK(assert_equal(5.32664756, error_actual, 1e-7))
CHECK(assert_equal(5.32664756,error_actual, 1e-7))
} }
TEST(QPSolver, HS268) { // This test needs an extra order of magnitude of tolerance than the rest TEST(QPSolver, HS268) { // This test needs an extra order of magnitude of
// tolerance than the rest
QP problem = QPSParser("HS268.QPS").Parse(); QP problem = QPSParser("HS268.QPS").Parse();
VectorValues actualSolution; VectorValues actual = QPSolver(problem).optimize().first;
boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); double error_actual = problem.cost.error(actual);
double error_actual = problem.cost.error(actualSolution); CHECK(assert_equal(5.73107049e-07, error_actual, 1e-6))
CHECK(assert_equal(5.73107049e-07,error_actual, 1e-6))
} }
TEST(QPSolver, QPTEST) { // REQUIRES Jacobian Fix TEST(QPSolver, QPTEST) { // REQUIRES Jacobian Fix
QP problem = QPSParser("QPTEST.QPS").Parse(); QP problem = QPSParser("QPTEST.QPS").Parse();
VectorValues actualSolution; VectorValues actual = QPSolver(problem).optimize().first;
boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); double error_actual = problem.cost.error(actual);
double error_actual = problem.cost.error(actualSolution); CHECK(assert_equal(0.437187500e01, error_actual, 1e-7))
CHECK(assert_equal(0.437187500e01,error_actual, 1e-7))
} }
/* ************************************************************************* */ /* ************************************************************************* */
// Create Matlab's test graph as in http://www.mathworks.com/help/optim/ug/quadprog.html // Create Matlab's test graph as in
// http://www.mathworks.com/help/optim/ug/quadprog.html
QP createTestMatlabQPEx() { QP createTestMatlabQPEx() {
QP qp; QP qp;
// Objective functions 0.5*x1^2 + x2^2 - x1*x2 - 2*x1 -6*x2 // Objective functions 0.5*x1^2 + x2^2 - x1*x2 - 2*x1 -6*x2
// Note the Hessian encodes: // Note the Hessian encodes:
// 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 +
// 0.5*f
// Hence, we have G11=1, G12 = -1, g1 = +2, G22 = 2, g2 = +6, f = 0 // Hence, we have G11=1, G12 = -1, g1 = +2, G22 = 2, g2 = +6, f = 0
qp.cost.push_back( qp.cost.push_back(HessianFactor(X(1), X(2), 1.0 * I_1x1, -I_1x1, 2.0 * I_1x1,
HessianFactor(X(1), X(2), 1.0 * I_1x1, -I_1x1, 2.0 * I_1x1, 2.0 * I_1x1, 2.0 * I_1x1, 6 * I_1x1, 1000.0));
6 * I_1x1, 1000.0));
// Inequality constraints // Inequality constraints
qp.inequalities.push_back(LinearInequality(X(1), I_1x1, X(2), I_1x1, 2, 0)); // x1 + x2 <= 2 qp.inequalities.add(X(1), I_1x1, X(2), I_1x1, 2, 0); // x1 + x2 <= 2
qp.inequalities.push_back( qp.inequalities.add(X(1), -I_1x1, X(2), 2 * I_1x1, 2, 1); //-x1 + 2*x2 <=2
LinearInequality(X(1), -I_1x1, X(2), 2 * I_1x1, 2, 1)); //-x1 + 2*x2 <=2 qp.inequalities.add(X(1), 2 * I_1x1, X(2), I_1x1, 3, 2); // 2*x1 + x2 <=3
qp.inequalities.push_back( qp.inequalities.add(X(1), -I_1x1, 0, 3); // -x1 <= 0
LinearInequality(X(1), 2 * I_1x1, X(2), I_1x1, 3, 2)); // 2*x1 + x2 <=3 qp.inequalities.add(X(2), -I_1x1, 0, 4); // -x2 <= 0
qp.inequalities.push_back(LinearInequality(X(1), -I_1x1, 0, 3)); // -x1 <= 0
qp.inequalities.push_back(LinearInequality(X(2), -I_1x1, 0, 4)); // -x2 <= 0
return qp; return qp;
} }
///* ************************************************************************* */ ///* *************************************************************************
///*/
TEST(QPSolver, optimizeMatlabEx) { TEST(QPSolver, optimizeMatlabEx) {
QP qp = createTestMatlabQPEx(); QP qp = createTestMatlabQPEx();
QPSolver solver(qp); QPSolver solver(qp);
VectorValues initialValues; VectorValues initialValues;
initialValues.insert(X(1), Z_1x1); initialValues.insert(X(1), Z_1x1);
initialValues.insert(X(2), Z_1x1); initialValues.insert(X(2), Z_1x1);
VectorValues solution; VectorValues solution = solver.optimize(initialValues).first;
boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); VectorValues expected;
VectorValues expectedSolution; expected.insert(X(1), (Vector(1) << 2.0 / 3.0).finished());
expectedSolution.insert(X(1), (Vector(1) << 2.0 / 3.0).finished()); expected.insert(X(2), (Vector(1) << 4.0 / 3.0).finished());
expectedSolution.insert(X(2), (Vector(1) << 4.0 / 3.0).finished()); CHECK(assert_equal(expected, solution, 1e-7));
CHECK(assert_equal(expectedSolution, solution, 1e-7));
} }
///* ************************************************************************* */ ///* *************************************************************************
///*/
TEST(QPSolver, optimizeMatlabExNoinitials) { TEST(QPSolver, optimizeMatlabExNoinitials) {
QP qp = createTestMatlabQPEx(); QP qp = createTestMatlabQPEx();
QPSolver solver(qp); QPSolver solver(qp);
VectorValues solution; VectorValues solution = solver.optimize().first;
boost::tie(solution, boost::tuples::ignore) = solver.optimize(); VectorValues expected;
VectorValues expectedSolution; expected.insert(X(1), (Vector(1) << 2.0 / 3.0).finished());
expectedSolution.insert(X(1), (Vector(1) << 2.0 / 3.0).finished()); expected.insert(X(2), (Vector(1) << 4.0 / 3.0).finished());
expectedSolution.insert(X(2), (Vector(1) << 4.0 / 3.0).finished()); CHECK(assert_equal(expected, solution, 1e-7));
CHECK(assert_equal(expectedSolution, solution, 1e-7));
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -387,18 +366,15 @@ TEST(QPSolver, optimizeMatlabExNoinitials) {
QP createTestNocedal06bookEx16_4() { QP createTestNocedal06bookEx16_4() {
QP qp; QP qp;
qp.cost.push_back(JacobianFactor(X(1), I_1x1, I_1x1)); qp.cost.add(X(1), I_1x1, I_1x1);
qp.cost.push_back(JacobianFactor(X(2), I_1x1, 2.5 * I_1x1)); qp.cost.add(X(2), I_1x1, 2.5 * I_1x1);
// Inequality constraints // Inequality constraints
qp.inequalities.push_back( qp.inequalities.add(X(1), -I_1x1, X(2), 2 * I_1x1, 2, 0);
LinearInequality(X(1), -I_1x1, X(2), 2 * I_1x1, 2, 0)); qp.inequalities.add(X(1), I_1x1, X(2), 2 * I_1x1, 6, 1);
qp.inequalities.push_back( qp.inequalities.add(X(1), I_1x1, X(2), -2 * I_1x1, 2, 2);
LinearInequality(X(1), I_1x1, X(2), 2 * I_1x1, 6, 1)); qp.inequalities.add(X(1), -I_1x1, 0.0, 3);
qp.inequalities.push_back( qp.inequalities.add(X(2), -I_1x1, 0.0, 4);
LinearInequality(X(1), I_1x1, X(2), -2 * I_1x1, 2, 2));
qp.inequalities.push_back(LinearInequality(X(1), -I_1x1, 0.0, 3));
qp.inequalities.push_back(LinearInequality(X(2), -I_1x1, 0.0, 4));
return qp; return qp;
} }
@ -410,21 +386,19 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4) {
initialValues.insert(X(1), (Vector(1) << 2.0).finished()); initialValues.insert(X(1), (Vector(1) << 2.0).finished());
initialValues.insert(X(2), Z_1x1); initialValues.insert(X(2), Z_1x1);
VectorValues solution; VectorValues solution = solver.optimize(initialValues).first;
boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); VectorValues expected;
VectorValues expectedSolution; expected.insert(X(1), (Vector(1) << 1.4).finished());
expectedSolution.insert(X(1), (Vector(1) << 1.4).finished()); expected.insert(X(2), (Vector(1) << 1.7).finished());
expectedSolution.insert(X(2), (Vector(1) << 1.7).finished()); CHECK(assert_equal(expected, solution, 1e-7));
CHECK(assert_equal(expectedSolution, solution, 1e-7));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(QPSolver, failedSubproblem) { TEST(QPSolver, failedSubproblem) {
QP qp; QP qp;
qp.cost.push_back(JacobianFactor(X(1), I_2x2, Z_2x1)); qp.cost.add(X(1), I_2x2, Z_2x1);
qp.cost.push_back(HessianFactor(X(1), Z_2x2, Z_2x1, 100.0)); qp.cost.push_back(HessianFactor(X(1), Z_2x2, Z_2x1, 100.0));
qp.inequalities.push_back( qp.inequalities.add(X(1), (Matrix(1, 2) << -1.0, 0.0).finished(), -1.0, 0);
LinearInequality(X(1), (Matrix(1, 2) << -1.0, 0.0).finished(), -1.0, 0));
VectorValues expected; VectorValues expected;
expected.insert(X(1), (Vector(2) << 1.0, 0.0).finished()); expected.insert(X(1), (Vector(2) << 1.0, 0.0).finished());
@ -433,8 +407,7 @@ TEST(QPSolver, failedSubproblem) {
initialValues.insert(X(1), (Vector(2) << 10.0, 100.0).finished()); initialValues.insert(X(1), (Vector(2) << 10.0, 100.0).finished());
QPSolver solver(qp); QPSolver solver(qp);
VectorValues solution; VectorValues solution = solver.optimize(initialValues).first;
boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues);
CHECK(assert_equal(expected, solution, 1e-7)); CHECK(assert_equal(expected, solution, 1e-7));
} }
@ -442,10 +415,9 @@ TEST(QPSolver, failedSubproblem) {
/* ************************************************************************* */ /* ************************************************************************* */
TEST(QPSolver, infeasibleInitial) { TEST(QPSolver, infeasibleInitial) {
QP qp; QP qp;
qp.cost.push_back(JacobianFactor(X(1), I_2x2, Vector::Zero(2))); qp.cost.add(X(1), I_2x2, Vector::Zero(2));
qp.cost.push_back(HessianFactor(X(1), Z_2x2, Vector::Zero(2), 100.0)); qp.cost.push_back(HessianFactor(X(1), Z_2x2, Vector::Zero(2), 100.0));
qp.inequalities.push_back( qp.inequalities.add(X(1), (Matrix(1, 2) << -1.0, 0.0).finished(), -1.0, 0);
LinearInequality(X(1), (Matrix(1, 2) << -1.0, 0.0).finished(), -1.0, 0));
VectorValues expected; VectorValues expected;
expected.insert(X(1), (Vector(2) << 1.0, 0.0).finished()); expected.insert(X(1), (Vector(2) << 1.0, 0.0).finished());
@ -464,4 +436,3 @@ int main() {
return TestRegistry::runAllTests(tr); return TestRegistry::runAllTests(tr);
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -2,6 +2,10 @@
include(GtsamMatlabWrap) include(GtsamMatlabWrap)
# Record the root dir for gtsam - needed during external builds, e.g., ROS
set(GTSAM_SOURCE_ROOT_DIR ${GTSAM_SOURCE_DIR})
message(STATUS "GTSAM_SOURCE_ROOT_DIR: [${GTSAM_SOURCE_ROOT_DIR}]")
# Tests # Tests
#message(STATUS "Installing Matlab Toolbox") #message(STATUS "Installing Matlab Toolbox")
install_matlab_scripts("${GTSAM_SOURCE_ROOT_DIR}/matlab/" "*.m;*.fig") install_matlab_scripts("${GTSAM_SOURCE_ROOT_DIR}/matlab/" "*.m;*.fig")
@ -21,7 +25,7 @@ install_matlab_scripts("${GTSAM_SOURCE_ROOT_DIR}/matlab/" "README-gtsam-toolbox.
file(GLOB matlab_examples_data_graph "${GTSAM_SOURCE_ROOT_DIR}/examples/Data/*.graph") file(GLOB matlab_examples_data_graph "${GTSAM_SOURCE_ROOT_DIR}/examples/Data/*.graph")
file(GLOB matlab_examples_data_mat "${GTSAM_SOURCE_ROOT_DIR}/examples/Data/*.mat") file(GLOB matlab_examples_data_mat "${GTSAM_SOURCE_ROOT_DIR}/examples/Data/*.mat")
file(GLOB matlab_examples_data_txt "${GTSAM_SOURCE_ROOT_DIR}/examples/Data/*.txt") file(GLOB matlab_examples_data_txt "${GTSAM_SOURCE_ROOT_DIR}/examples/Data/*.txt")
set(matlab_examples_data ${matlab_examples_data_graph} ${matlab_examples_data_mat} ${matlab_examples_data_txt}) set(matlab_examples_data ${matlab_examples_data_graph} ${matlab_examples_data_mat} ${matlab_examples_data_txt})
if(GTSAM_BUILD_TYPE_POSTFIXES) if(GTSAM_BUILD_TYPE_POSTFIXES)
foreach(build_type ${CMAKE_CONFIGURATION_TYPES}) foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
string(TOUPPER "${build_type}" build_type_upper) string(TOUPPER "${build_type}" build_type_upper)
@ -38,4 +42,3 @@ if(GTSAM_BUILD_TYPE_POSTFIXES)
else() else()
install(FILES ${matlab_examples_data} DESTINATION ${GTSAM_TOOLBOX_INSTALL_PATH}/gtsam_examples/Data) install(FILES ${matlab_examples_data} DESTINATION ${GTSAM_TOOLBOX_INSTALL_PATH}/gtsam_examples/Data)
endif() endif()

View File

@ -37,7 +37,8 @@ set(ignore
gtsam::Point2Vector gtsam::Point2Vector
gtsam::Pose3Vector gtsam::Pose3Vector
gtsam::KeyVector gtsam::KeyVector
gtsam::BinaryMeasurementsUnit3) gtsam::BinaryMeasurementsUnit3
gtsam::KeyPairDoubleMap)
pybind_wrap(gtsam_py # target pybind_wrap(gtsam_py # target
${PROJECT_SOURCE_DIR}/gtsam/gtsam.i # interface_header ${PROJECT_SOURCE_DIR}/gtsam/gtsam.i # interface_header
@ -80,7 +81,9 @@ set(ignore
gtsam::Pose3Vector gtsam::Pose3Vector
gtsam::KeyVector gtsam::KeyVector
gtsam::FixedLagSmootherKeyTimestampMapValue gtsam::FixedLagSmootherKeyTimestampMapValue
gtsam::BinaryMeasurementsUnit3) gtsam::BinaryMeasurementsUnit3
gtsam::KeyPairDoubleMap)
pybind_wrap(gtsam_unstable_py # target pybind_wrap(gtsam_unstable_py # target
${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header ${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header
"gtsam_unstable.cpp" # generated_cpp "gtsam_unstable.cpp" # generated_cpp

View File

@ -39,7 +39,7 @@
| SelfCalibrationExample | | | SelfCalibrationExample | |
| SFMdata | | | SFMdata | |
| SFMExample_bal_COLAMD_METIS | | | SFMExample_bal_COLAMD_METIS | |
| SFMExample_bal | | | SFMExample_bal | :heavy_check_mark: |
| SFMExample | :heavy_check_mark: | | SFMExample | :heavy_check_mark: |
| SFMExampleExpressions_bal | | | SFMExampleExpressions_bal | |
| SFMExampleExpressions | | | SFMExampleExpressions | |

View File

@ -0,0 +1,118 @@
"""
GTSAM Copyright 2010, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
See LICENSE for the license information
Solve a structure-from-motion problem from a "Bundle Adjustment in the Large" file
Author: Frank Dellaert (Python: Akshay Krishnan, John Lambert)
"""
import argparse
import logging
import sys
import matplotlib.pyplot as plt
import numpy as np
import gtsam
from gtsam import (
GeneralSFMFactorCal3Bundler,
PinholeCameraCal3Bundler,
PriorFactorPinholeCameraCal3Bundler,
readBal,
symbol_shorthand
)
C = symbol_shorthand.C
P = symbol_shorthand.P
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def run(args):
""" Run LM optimization with BAL input data and report resulting error """
input_file = gtsam.findExampleDataFile(args.input_file)
# Load the SfM data from file
scene_data = readBal(input_file)
logging.info(f"read {scene_data.number_tracks()} tracks on {scene_data.number_cameras()} cameras\n")
# Create a factor graph
graph = gtsam.NonlinearFactorGraph()
# We share *one* noiseModel between all projection factors
noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v
# Add measurements to the factor graph
j = 0
for t_idx in range(scene_data.number_tracks()):
track = scene_data.track(t_idx) # SfmTrack
# retrieve the SfmMeasurement objects
for m_idx in range(track.number_measurements()):
# i represents the camera index, and uv is the 2d measurement
i, uv = track.measurement(m_idx)
# note use of shorthand symbols C and P
graph.add(GeneralSFMFactorCal3Bundler(uv, noise, C(i), P(j)))
j += 1
# Add a prior on pose x1. This indirectly specifies where the origin is.
graph.push_back(
gtsam.PriorFactorPinholeCameraCal3Bundler(
C(0), scene_data.camera(0), gtsam.noiseModel.Isotropic.Sigma(9, 0.1)
)
)
# Also add a prior on the position of the first landmark to fix the scale
graph.push_back(
gtsam.PriorFactorPoint3(
P(0), scene_data.track(0).point3(), gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
)
)
# Create initial estimate
initial = gtsam.Values()
i = 0
# add each PinholeCameraCal3Bundler
for cam_idx in range(scene_data.number_cameras()):
camera = scene_data.camera(cam_idx)
initial.insert(C(i), camera)
i += 1
j = 0
# add each SfmTrack
for t_idx in range(scene_data.number_tracks()):
track = scene_data.track(t_idx)
initial.insert(P(j), track.point3())
j += 1
# Optimize the graph and print results
try:
params = gtsam.LevenbergMarquardtParams()
params.setVerbosityLM("ERROR")
lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
result = lm.optimize()
except Exception as e:
logging.exception("LM Optimization failed")
return
# Error drops from ~2764.22 to ~0.046
logging.info(f"final error: {graph.error(result)}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--input_file',
type=str,
default="dubrovnik-3-7-pre",
help='Read SFM data from the specified BAL file'
'The data format is described here: https://grail.cs.washington.edu/projects/bal/.'
'BAL files contain (nrPoses, nrPoints, nrObservations), followed by (i,j,u,v) tuples, '
'then (wx,wy,wz,tx,ty,tz,f,k1,k1) as Bundler camera calibrations w/ Rodrigues vector'
'and (x,y,z) 3d point initializations.'
)
run(parser.parse_args())

View File

@ -0,0 +1,144 @@
"""
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
See LICENSE for the license information
This example shows how 1dsfm uses outlier rejection (MFAS) and optimization (translation recovery)
together for estimating global translations from relative translation directions and global rotations.
The purpose of this example is to illustrate the connection between these two classes using a small SfM dataset.
Author: Akshay Krishnan
Date: September 2020
"""
from collections import defaultdict
from typing import List, Tuple
import numpy as np
import gtsam
from gtsam.examples import SFMdata
# Hyperparameters for 1dsfm, values used from Kyle Wilson's code.
MAX_1DSFM_PROJECTION_DIRECTIONS = 48
OUTLIER_WEIGHT_THRESHOLD = 0.1
def get_data() -> Tuple[gtsam.Values, List[gtsam.BinaryMeasurementUnit3]]:
""""Returns global rotations and unit translation directions between 8 cameras
that lie on a circle and face the center. The poses of 8 cameras are obtained from SFMdata
and the unit translations directions between some camera pairs are computed from their
global translations. """
fx, fy, s, u0, v0 = 50.0, 50.0, 0.0, 50.0, 50.0
wTc_list = SFMdata.createPoses(gtsam.Cal3_S2(fx, fy, s, u0, v0))
# Rotations of the cameras in the world frame.
wRc_values = gtsam.Values()
# Normalized translation directions from camera i to camera j
# in the coordinate frame of camera i.
i_iZj_list = []
for i in range(0, len(wTc_list) - 2):
# Add the rotation.
wRi = wTc_list[i].rotation()
wRc_values.insert(i, wRi)
# Create unit translation measurements with next two poses.
for j in range(i + 1, i + 3):
# Compute the translation from pose i to pose j, in the world reference frame.
w_itj = wTc_list[j].translation() - wTc_list[i].translation()
# Obtain the translation in the camera i's reference frame.
i_itj = wRi.unrotate(w_itj)
# Compute the normalized unit translation direction.
i_iZj = gtsam.Unit3(i_itj)
i_iZj_list.append(gtsam.BinaryMeasurementUnit3(
i, j, i_iZj, gtsam.noiseModel.Isotropic.Sigma(3, 0.01)))
# Add the last two rotations.
wRc_values.insert(len(wTc_list) - 1, wTc_list[-1].rotation())
wRc_values.insert(len(wTc_list) - 2, wTc_list[-2].rotation())
return wRc_values, i_iZj_list
def filter_outliers(w_iZj_list: gtsam.BinaryMeasurementsUnit3) -> gtsam.BinaryMeasurementsUnit3:
"""Removes outliers from a list of Unit3 measurements that are the
translation directions from camera i to camera j in the world frame."""
# Indices of measurements that are to be used as projection directions.
# These are randomly chosen. All sampled directions must be unique.
num_directions_to_sample = min(
MAX_1DSFM_PROJECTION_DIRECTIONS, len(w_iZj_list))
sampled_indices = np.random.choice(
len(w_iZj_list), num_directions_to_sample, replace=False)
# Sample projection directions from the measurements.
projection_directions = [w_iZj_list[idx].measured()
for idx in sampled_indices]
outlier_weights = []
# Find the outlier weights for each direction using MFAS.
for direction in projection_directions:
algorithm = gtsam.MFAS(w_iZj_list, direction)
outlier_weights.append(algorithm.computeOutlierWeights())
# Compute average of outlier weights. Each outlier weight is a map from a pair of Keys
# (camera IDs) to a weight, where weights are proportional to the probability of the edge
# being an outlier.
avg_outlier_weights = defaultdict(float)
for outlier_weight_dict in outlier_weights:
for keypair, weight in outlier_weight_dict.items():
avg_outlier_weights[keypair] += weight / len(outlier_weights)
# Remove w_iZj that have weight greater than threshold, these are outliers.
w_iZj_inliers = gtsam.BinaryMeasurementsUnit3()
[w_iZj_inliers.append(w_iZj) for w_iZj in w_iZj_list if avg_outlier_weights[(
w_iZj.key1(), w_iZj.key2())] < OUTLIER_WEIGHT_THRESHOLD]
return w_iZj_inliers
def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3,
wRc_values: gtsam.Values) -> gtsam.Values:
"""Estimate poses given rotations and normalized translation directions between cameras.
Args:
i_iZj_list: List of normalized translation direction measurements between camera pairs,
Z here refers to measurements. The measurements are of camera j with reference
to camera i (iZj), in camera i's coordinate frame (i_). iZj represents a unit
vector to j in i's frame and is not a transformation.
wRc_values: Rotations of the cameras in the world frame.
Returns:
gtsam.Values: Estimated poses.
"""
# Convert the translation direction measurements to world frame using the rotations.
w_iZj_list = gtsam.BinaryMeasurementsUnit3()
for i_iZj in i_iZj_list:
w_iZj = gtsam.Unit3(wRc_values.atRot3(i_iZj.key1())
.rotate(i_iZj.measured().point3()))
w_iZj_list.append(gtsam.BinaryMeasurementUnit3(
i_iZj.key1(), i_iZj.key2(), w_iZj, i_iZj.noiseModel()))
# Remove the outliers in the unit translation directions.
w_iZj_inliers = filter_outliers(w_iZj_list)
# Run the optimizer to obtain translations for normalized directions.
wtc_values = gtsam.TranslationRecovery(w_iZj_inliers).run()
wTc_values = gtsam.Values()
for key in wRc_values.keys():
wTc_values.insert(key, gtsam.Pose3(
wRc_values.atRot3(key), wtc_values.atPoint3(key)))
return wTc_values
def main():
wRc_values, i_iZj_list = get_data()
wTc_values = estimate_poses(i_iZj_list, wRc_values)
print("**** Translation averaging output ****")
print(wTc_values)
print("**************************************")
if __name__ == '__main__':
main()

View File

@ -12,3 +12,4 @@ py::bind_vector<std::vector<boost::shared_ptr<gtsam::BetweenFactor<gtsam::Pose2>
py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Unit3> > >(m_, "BinaryMeasurementsUnit3"); py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Unit3> > >(m_, "BinaryMeasurementsUnit3");
py::bind_map<gtsam::IndexPairSetMap>(m_, "IndexPairSetMap"); py::bind_map<gtsam::IndexPairSetMap>(m_, "IndexPairSetMap");
py::bind_vector<gtsam::IndexPairVector>(m_, "IndexPairVector"); py::bind_vector<gtsam::IndexPairVector>(m_, "IndexPairVector");
py::bind_map<gtsam::KeyPairDoubleMap>(m_, "KeyPairDoubleMap");

View File

@ -19,7 +19,7 @@ from gtsam.utils.test_case import GtsamTestCase
class TestJacobianFactor(GtsamTestCase): class TestJacobianFactor(GtsamTestCase):
def test_eliminate(self): def test_eliminate(self):
# Recommended way to specify a matrix (see cython/README) # Recommended way to specify a matrix (see python/README)
Ax2 = np.array( Ax2 = np.array(
[[-5., 0.], [[-5., 0.],
[+0., -5.], [+0., -5.],

View File

@ -65,6 +65,14 @@ class TestPose3(GtsamTestCase):
actual = Pose3.adjoint_(xi, xi) actual = Pose3.adjoint_(xi, xi)
np.testing.assert_array_equal(actual, expected) np.testing.assert_array_equal(actual, expected)
def test_serialization(self):
"""Test if serialization is working normally"""
expected = Pose3(Rot3.Ypr(0.0, 1.0, 0.0), Point3(1, 1, 0))
actual = Pose3()
serialized = expected.serialize()
actual.deserialize(serialized)
self.gtsamAssertEquals(expected, actual, 1e-10)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()

View File

@ -0,0 +1,79 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
Unit tests for testing dataset access.
Author: Frank Dellaert (Python: Sushmita Warrier)
"""
# pylint: disable=invalid-name, no-name-in-module, no-member
from __future__ import print_function
import unittest
import numpy as np
import gtsam
from gtsam.utils.test_case import GtsamTestCase
class TestSfmData(GtsamTestCase):
"""Tests for SfmData and SfmTrack modules."""
def setUp(self):
"""Initialize SfmData and SfmTrack"""
self.data = gtsam.SfmData()
# initialize SfmTrack with 3D point
self.tracks = gtsam.SfmTrack()
def test_tracks(self):
"""Test functions in SfmTrack"""
# measurement is of format (camera_idx, imgPoint)
# create arbitrary camera indices for two cameras
i1, i2 = 4,5
# create arbitrary image measurements for cameras i1 and i2
uv_i1 = gtsam.Point2(12.6, 82)
# translating point uv_i1 along X-axis
uv_i2 = gtsam.Point2(24.88, 82)
# add measurements to the track
self.tracks.add_measurement(i1, uv_i1)
self.tracks.add_measurement(i2, uv_i2)
# Number of measurements in the track is 2
self.assertEqual(self.tracks.number_measurements(), 2)
# camera_idx in the first measurement of the track corresponds to i1
cam_idx, img_measurement = self.tracks.measurement(0)
self.assertEqual(cam_idx, i1)
np.testing.assert_array_almost_equal(
gtsam.Point3(0.,0.,0.),
self.tracks.point3()
)
def test_data(self):
"""Test functions in SfmData"""
# Create new track with 3 measurements
i1, i2, i3 = 3,5,6
uv_i1 = gtsam.Point2(21.23, 45.64)
# translating along X-axis
uv_i2 = gtsam.Point2(45.7, 45.64)
uv_i3 = gtsam.Point2(68.35, 45.64)
# add measurements and arbitrary point to the track
measurements = [(i1, uv_i1), (i2, uv_i2), (i3, uv_i3)]
pt = gtsam.Point3(1.0, 6.0, 2.0)
track2 = gtsam.SfmTrack(pt)
track2.add_measurement(i1, uv_i1)
track2.add_measurement(i2, uv_i2)
track2.add_measurement(i3, uv_i3)
self.data.add_track(self.tracks)
self.data.add_track(track2)
# Number of tracks in SfmData is 2
self.assertEqual(self.data.number_tracks(), 2)
# camera idx of first measurement of second track corresponds to i1
cam_idx, img_measurement = self.data.track(1).measurement(0)
self.assertEqual(cam_idx, i1)
if __name__ == '__main__':
unittest.main()

View File

@ -566,6 +566,58 @@ TEST( NonlinearOptimizer, logfile )
// EXPECT(actual.str()==expected.str()); // EXPECT(actual.str()==expected.str());
} }
/* ************************************************************************* */
TEST( NonlinearOptimizer, iterationHook_LM )
{
NonlinearFactorGraph fg(example::createReallyNonlinearFactorGraph());
Point2 x0(3,3);
Values c0;
c0.insert(X(1), x0);
// Levenberg-Marquardt
LevenbergMarquardtParams lmParams;
size_t lastIterCalled = 0;
lmParams.iterationHook = [&](size_t iteration, double oldError, double newError)
{
// Tests:
lastIterCalled = iteration;
EXPECT(newError<oldError);
// Example of evolution printout:
//std::cout << "iter: " << iteration << " error: " << oldError << " => " << newError <<"\n";
};
LevenbergMarquardtOptimizer(fg, c0, lmParams).optimize();
EXPECT(lastIterCalled>5);
}
/* ************************************************************************* */
TEST( NonlinearOptimizer, iterationHook_CG )
{
NonlinearFactorGraph fg(example::createReallyNonlinearFactorGraph());
Point2 x0(3,3);
Values c0;
c0.insert(X(1), x0);
// Levenberg-Marquardt
NonlinearConjugateGradientOptimizer::Parameters cgParams;
size_t lastIterCalled = 0;
cgParams.iterationHook = [&](size_t iteration, double oldError, double newError)
{
// Tests:
lastIterCalled = iteration;
EXPECT(newError<oldError);
// Example of evolution printout:
//std::cout << "iter: " << iteration << " error: " << oldError << " => " << newError <<"\n";
};
NonlinearConjugateGradientOptimizer(fg, c0, cgParams).optimize();
EXPECT(lastIterCalled>5);
}
/* ************************************************************************* */ /* ************************************************************************* */
//// Minimal traits example //// Minimal traits example
struct MyType : public Vector3 { struct MyType : public Vector3 {

View File

@ -146,7 +146,7 @@ function(install_python_scripts
else() else()
set(build_type_tag "") set(build_type_tag "")
endif() endif()
# Split up filename to strip trailing '/' in WRAP_CYTHON_INSTALL_PATH if # Split up filename to strip trailing '/' in GTSAM_PY_INSTALL_PATH if
# there is one # there is one
get_filename_component(location "${dest_directory}" PATH) get_filename_component(location "${dest_directory}" PATH)
get_filename_component(name "${dest_directory}" NAME) get_filename_component(name "${dest_directory}" NAME)

View File

@ -69,13 +69,13 @@ class PybindWrapper(object):
return textwrap.dedent(''' return textwrap.dedent('''
.def("serialize", .def("serialize",
[]({class_inst} self){{ []({class_inst} self){{
return gtsam::serialize(self); return gtsam::serialize(*self);
}} }}
) )
.def("deserialize", .def("deserialize",
[]({class_inst} self, string serialized){{ []({class_inst} self, string serialized){{
return gtsam::deserialize(serialized, self); gtsam::deserialize(serialized, *self);
}}) }}, py::arg("serialized"))
'''.format(class_inst=cpp_class + '*')) '''.format(class_inst=cpp_class + '*'))
is_method = isinstance(method, instantiator.InstantiatedMethod) is_method = isinstance(method, instantiator.InstantiatedMethod)

View File

@ -40,13 +40,13 @@ PYBIND11_MODULE(geometry_py, m_) {
.def("vectorConfusion",[](gtsam::Point2* self){return self->vectorConfusion();}) .def("vectorConfusion",[](gtsam::Point2* self){return self->vectorConfusion();})
.def("serialize", .def("serialize",
[](gtsam::Point2* self){ [](gtsam::Point2* self){
return gtsam::serialize(self); return gtsam::serialize(*self);
} }
) )
.def("deserialize", .def("deserialize",
[](gtsam::Point2* self, string serialized){ [](gtsam::Point2* self, string serialized){
return gtsam::deserialize(serialized, self); gtsam::deserialize(serialized, *self);
}) }, py::arg("serialized"))
; ;
py::class_<gtsam::Point3, std::shared_ptr<gtsam::Point3>>(m_gtsam, "Point3") py::class_<gtsam::Point3, std::shared_ptr<gtsam::Point3>>(m_gtsam, "Point3")
@ -54,13 +54,13 @@ PYBIND11_MODULE(geometry_py, m_) {
.def("norm",[](gtsam::Point3* self){return self->norm();}) .def("norm",[](gtsam::Point3* self){return self->norm();})
.def("serialize", .def("serialize",
[](gtsam::Point3* self){ [](gtsam::Point3* self){
return gtsam::serialize(self); return gtsam::serialize(*self);
} }
) )
.def("deserialize", .def("deserialize",
[](gtsam::Point3* self, string serialized){ [](gtsam::Point3* self, string serialized){
return gtsam::deserialize(serialized, self); gtsam::deserialize(serialized, *self);
}) }, py::arg("serialized"))
.def_static("staticFunction",[](){return gtsam::Point3::staticFunction();}) .def_static("staticFunction",[](){return gtsam::Point3::staticFunction();})
.def_static("StaticFunctionRet",[]( double z){return gtsam::Point3::StaticFunctionRet(z);}, py::arg("z")); .def_static("StaticFunctionRet",[]( double z){return gtsam::Point3::StaticFunctionRet(z);}, py::arg("z"));