Merge branch 'develop' into fix/iSAM2
commit
4d68f0e55a
|
@ -0,0 +1,8 @@
|
|||
BasedOnStyle: Google
|
||||
|
||||
BinPackArguments: false
|
||||
BinPackParameters: false
|
||||
ColumnLimit: 100
|
||||
DerivePointerAlignment: false
|
||||
IncludeBlocks: Preserve
|
||||
PointerAlignment: Left
|
|
@ -1,18 +0,0 @@
|
|||
### Script to install Boost
|
||||
BOOST_FOLDER=boost_${BOOST_VERSION//./_}
|
||||
|
||||
# Download Boost
|
||||
wget https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/${BOOST_FOLDER}.tar.gz
|
||||
|
||||
# Unzip
|
||||
tar -zxf ${BOOST_FOLDER}.tar.gz
|
||||
|
||||
# Bootstrap
|
||||
cd ${BOOST_FOLDER}/
|
||||
./bootstrap.sh --with-libraries=serialization,filesystem,thread,system,atomic,date_time,timer,chrono,program_options,regex
|
||||
|
||||
# Build and install
|
||||
sudo ./b2 -j$(nproc) install
|
||||
|
||||
# Rebuild ld cache
|
||||
sudo ldconfig
|
|
@ -9,33 +9,14 @@ set -x -e
|
|||
# install TBB with _debug.so files
|
||||
function install_tbb()
|
||||
{
|
||||
TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download
|
||||
TBB_VERSION=4.4.5
|
||||
TBB_DIR=tbb44_20160526oss
|
||||
TBB_SAVEPATH="/tmp/tbb.tgz"
|
||||
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
OS_SHORT="lin"
|
||||
TBB_LIB_DIR="intel64/gcc4.4"
|
||||
SUDO="sudo"
|
||||
sudo apt-get -y install libtbb-dev
|
||||
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
OS_SHORT="osx"
|
||||
TBB_LIB_DIR=""
|
||||
SUDO=""
|
||||
brew install tbb
|
||||
|
||||
fi
|
||||
|
||||
wget "${TBB_BASEURL}/${TBB_VERSION}/${TBB_DIR}_${OS_SHORT}.tgz" -O $TBB_SAVEPATH
|
||||
tar -C /tmp -xf $TBB_SAVEPATH
|
||||
|
||||
TBBROOT=/tmp/$TBB_DIR
|
||||
# Copy the needed files to the correct places.
|
||||
# This works correctly for CI builds, instead of setting path variables.
|
||||
# This is what Homebrew does to install TBB on Macs
|
||||
$SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/
|
||||
$SUDO cp -R $TBBROOT/include/ /usr/local/include/
|
||||
|
||||
}
|
||||
|
||||
if [ -z ${PYTHON_VERSION+x} ]; then
|
||||
|
@ -43,46 +24,68 @@ if [ -z ${PYTHON_VERSION+x} ]; then
|
|||
exit 127
|
||||
fi
|
||||
|
||||
PYTHON="python${PYTHON_VERSION}"
|
||||
export PYTHON="python${PYTHON_VERSION}"
|
||||
|
||||
if [[ $(uname) == "Darwin" ]]; then
|
||||
function install_dependencies()
|
||||
{
|
||||
if [[ $(uname) == "Darwin" ]]; then
|
||||
brew install wget
|
||||
else
|
||||
else
|
||||
# Install a system package required by our library
|
||||
sudo apt-get install -y wget libicu-dev python3-pip python3-setuptools
|
||||
fi
|
||||
fi
|
||||
|
||||
PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
|
||||
export PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
|
||||
|
||||
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
|
||||
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
|
||||
|
||||
$PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
|
||||
}
|
||||
|
||||
function build()
|
||||
{
|
||||
mkdir $GITHUB_WORKSPACE/build
|
||||
cd $GITHUB_WORKSPACE/build
|
||||
|
||||
BUILD_PYBIND="ON"
|
||||
|
||||
cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
|
||||
-DGTSAM_BUILD_TESTS=OFF \
|
||||
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_USE_QUATERNIONS=OFF \
|
||||
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
|
||||
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
||||
-DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \
|
||||
-DGTSAM_UNSTABLE_BUILD_PYTHON=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
|
||||
-DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V43=OFF \
|
||||
-DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
|
||||
|
||||
|
||||
BUILD_PYBIND="ON"
|
||||
# Set to 2 cores so that Actions does not error out during resource provisioning.
|
||||
make -j2 install
|
||||
|
||||
sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
|
||||
cd $GITHUB_WORKSPACE/build/python
|
||||
$PYTHON -m pip install --user .
|
||||
}
|
||||
|
||||
mkdir $GITHUB_WORKSPACE/build
|
||||
cd $GITHUB_WORKSPACE/build
|
||||
function test()
|
||||
{
|
||||
cd $GITHUB_WORKSPACE/python/gtsam/tests
|
||||
$PYTHON -m unittest discover -v
|
||||
}
|
||||
|
||||
cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
|
||||
-DGTSAM_BUILD_TESTS=OFF \
|
||||
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_USE_QUATERNIONS=OFF \
|
||||
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
|
||||
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
||||
-DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \
|
||||
-DGTSAM_UNSTABLE_BUILD_PYTHON=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
|
||||
-DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V42=OFF \
|
||||
-DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
|
||||
|
||||
|
||||
# Set to 2 cores so that Actions does not error out during resource provisioning.
|
||||
make -j2 install
|
||||
|
||||
cd $GITHUB_WORKSPACE/build/python
|
||||
$PYTHON -m pip install --user .
|
||||
cd $GITHUB_WORKSPACE/python/gtsam/tests
|
||||
$PYTHON -m unittest discover -v
|
||||
# select between build or test
|
||||
case $1 in
|
||||
-d)
|
||||
install_dependencies
|
||||
;;
|
||||
-b)
|
||||
build
|
||||
;;
|
||||
-t)
|
||||
test
|
||||
;;
|
||||
esac
|
||||
|
|
|
@ -8,33 +8,14 @@
|
|||
# install TBB with _debug.so files
|
||||
function install_tbb()
|
||||
{
|
||||
TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download
|
||||
TBB_VERSION=4.4.5
|
||||
TBB_DIR=tbb44_20160526oss
|
||||
TBB_SAVEPATH="/tmp/tbb.tgz"
|
||||
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
OS_SHORT="lin"
|
||||
TBB_LIB_DIR="intel64/gcc4.4"
|
||||
SUDO="sudo"
|
||||
sudo apt-get -y install libtbb-dev
|
||||
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
OS_SHORT="osx"
|
||||
TBB_LIB_DIR=""
|
||||
SUDO=""
|
||||
brew install tbb
|
||||
|
||||
fi
|
||||
|
||||
wget "${TBB_BASEURL}/${TBB_VERSION}/${TBB_DIR}_${OS_SHORT}.tgz" -O $TBB_SAVEPATH
|
||||
tar -C /tmp -xf $TBB_SAVEPATH
|
||||
|
||||
TBBROOT=/tmp/$TBB_DIR
|
||||
# Copy the needed files to the correct places.
|
||||
# This works correctly for CI builds, instead of setting path variables.
|
||||
# This is what Homebrew does to install TBB on Macs
|
||||
$SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/
|
||||
$SUDO cp -R $TBBROOT/include/ /usr/local/include/
|
||||
|
||||
}
|
||||
|
||||
# common tasks before either build or test
|
||||
|
@ -58,22 +39,22 @@ function configure()
|
|||
fi
|
||||
|
||||
# GTSAM_BUILD_WITH_MARCH_NATIVE=OFF: to avoid crashes in builder VMs
|
||||
# CMAKE_CXX_FLAGS="-w": Suppress warnings to avoid IO latency in CI logs
|
||||
cmake $SOURCE_DIR \
|
||||
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Debug} \
|
||||
-DCMAKE_CXX_FLAGS="-w" \
|
||||
-DGTSAM_BUILD_TESTS=${GTSAM_BUILD_TESTS:-OFF} \
|
||||
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
|
||||
-DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V42=${GTSAM_ALLOW_DEPRECATED_SINCE_V42:-OFF} \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V43=${GTSAM_ALLOW_DEPRECATED_SINCE_V43:-OFF} \
|
||||
-DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \
|
||||
-DGTSAM_ROT3_EXPMAP=${GTSAM_ROT3_EXPMAP:-ON} \
|
||||
-DGTSAM_POSE3_EXPMAP=${GTSAM_POSE3_EXPMAP:-ON} \
|
||||
-DGTSAM_USE_SYSTEM_EIGEN=${GTSAM_USE_SYSTEM_EIGEN:-OFF} \
|
||||
-DGTSAM_USE_SYSTEM_METIS=${GTSAM_USE_SYSTEM_METIS:-OFF} \
|
||||
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
||||
-DBOOST_ROOT=$BOOST_ROOT \
|
||||
-DBoost_NO_SYSTEM_PATHS=ON \
|
||||
-DBoost_ARCHITECTURE=-x64
|
||||
-DGTSAM_SINGLE_TEST_EXE=OFF
|
||||
}
|
||||
|
||||
|
||||
|
@ -95,7 +76,11 @@ function build ()
|
|||
configure
|
||||
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
make -j$(nproc)
|
||||
if (($(nproc) > 2)); then
|
||||
make -j4
|
||||
else
|
||||
make -j2
|
||||
fi
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu)
|
||||
fi
|
||||
|
@ -113,7 +98,11 @@ function test ()
|
|||
|
||||
# Actual testing
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
make -j$(nproc) check
|
||||
if (($(nproc) > 2)); then
|
||||
make -j$(nproc) check
|
||||
else
|
||||
make -j2 check
|
||||
fi
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu) check
|
||||
fi
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
name: Linux CI
|
||||
|
||||
on: [push, pull_request]
|
||||
on: [pull_request]
|
||||
|
||||
# Every time you make a push to your PR, it cancel immediately the previous checks,
|
||||
# and start a new one. The other runner will be available more quickly to your PR.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
@ -12,7 +18,6 @@ jobs:
|
|||
CTEST_PARALLEL_LEVEL: 2
|
||||
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
|
||||
GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
|
||||
BOOST_VERSION: 1.67.0
|
||||
|
||||
strategy:
|
||||
fail-fast: true
|
||||
|
@ -20,37 +25,43 @@ jobs:
|
|||
# Github Actions requires a single row to be added to the build matrix.
|
||||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
name: [
|
||||
ubuntu-18.04-gcc-5,
|
||||
ubuntu-18.04-gcc-9,
|
||||
ubuntu-18.04-clang-9,
|
||||
ubuntu-20.04-gcc-9,
|
||||
ubuntu-20.04-clang-9,
|
||||
ubuntu-22.04-gcc-11,
|
||||
ubuntu-22.04-clang-14,
|
||||
]
|
||||
|
||||
build_type: [Debug, Release]
|
||||
build_type: [Release]
|
||||
build_unstable: [ON]
|
||||
include:
|
||||
- name: ubuntu-18.04-gcc-5
|
||||
os: ubuntu-18.04
|
||||
compiler: gcc
|
||||
version: "5"
|
||||
|
||||
- name: ubuntu-18.04-gcc-9
|
||||
os: ubuntu-18.04
|
||||
- name: ubuntu-20.04-gcc-9
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
|
||||
- name: ubuntu-18.04-clang-9
|
||||
os: ubuntu-18.04
|
||||
- name: ubuntu-20.04-clang-9
|
||||
os: ubuntu-20.04
|
||||
compiler: clang
|
||||
version: "9"
|
||||
|
||||
- name: ubuntu-22.04-gcc-11
|
||||
os: ubuntu-22.04
|
||||
compiler: gcc
|
||||
version: "11"
|
||||
|
||||
- name: ubuntu-22.04-clang-14
|
||||
os: ubuntu-22.04
|
||||
compiler: clang
|
||||
version: "14"
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
# LLVM (clang) 9 is not in Bionic's repositories so we add the official LLVM repository.
|
||||
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
|
||||
# LLVM (clang) 9/14 is not in Bionic's repositories so we add the official LLVM repository.
|
||||
if [ "${{ matrix.compiler }}" = "clang" ]; then
|
||||
# (ipv4|ha).pool.sks-keyservers.net is the SKS GPG global keyserver pool
|
||||
# ipv4 avoids potential timeouts because of crappy IPv6 infrastructure
|
||||
# 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository
|
||||
|
@ -60,9 +71,9 @@ jobs:
|
|||
gpg -a --export $LLVM_KEY | sudo apt-key add -
|
||||
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
|
||||
fi
|
||||
sudo apt-get -y update
|
||||
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython-dev python-numpy libicu-dev
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev
|
||||
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
|
||||
|
@ -76,7 +87,7 @@ jobs:
|
|||
|
||||
- name: Install Boost
|
||||
run: |
|
||||
bash .github/scripts/boost.sh
|
||||
sudo apt-get -y install libboost-all-dev
|
||||
|
||||
- name: Build and Test
|
||||
run: bash .github/scripts/unix.sh -t
|
||||
|
|
|
@ -2,6 +2,12 @@ name: macOS CI
|
|||
|
||||
on: [pull_request]
|
||||
|
||||
# Every time you make a push to your PR, it cancel immediately the previous checks,
|
||||
# and start a new one. The other runner will be available more quickly to your PR.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: ${{ matrix.name }} ${{ matrix.build_type }}
|
||||
|
@ -14,38 +20,32 @@ jobs:
|
|||
GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
fail-fast: true
|
||||
matrix:
|
||||
# Github Actions requires a single row to be added to the build matrix.
|
||||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
name: [
|
||||
macOS-10.15-xcode-11.3.1,
|
||||
macos-11-xcode-13.4.1,
|
||||
]
|
||||
|
||||
build_type: [Debug, Release]
|
||||
build_unstable: [ON]
|
||||
include:
|
||||
- name: macOS-10.15-xcode-11.3.1
|
||||
os: macOS-10.15
|
||||
- name: macos-11-xcode-13.4.1
|
||||
os: macos-11
|
||||
compiler: xcode
|
||||
version: "11.3.1"
|
||||
version: "13.4.1"
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
brew install cmake ninja
|
||||
brew install boost
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
brew install gcc@${{ matrix.version }}
|
||||
echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
else
|
||||
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
fi
|
||||
sudo xcode-select -switch /Applications/Xcode.app
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
- name: Build and Test
|
||||
run: bash .github/scripts/unix.sh -t
|
||||
|
|
|
@ -2,6 +2,12 @@ name: Python CI
|
|||
|
||||
on: [pull_request]
|
||||
|
||||
# Every time you make a push to your PR, it cancel immediately the previous checks,
|
||||
# and start a new one. The other runner will be available more quickly to your PR.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: ${{ matrix.name }} ${{ matrix.build_type }} Python ${{ matrix.python_version }}
|
||||
|
@ -14,58 +20,46 @@ jobs:
|
|||
PYTHON_VERSION: ${{ matrix.python_version }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
fail-fast: true
|
||||
matrix:
|
||||
# Github Actions requires a single row to be added to the build matrix.
|
||||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
name: [
|
||||
ubuntu-18.04-gcc-5,
|
||||
ubuntu-18.04-gcc-9,
|
||||
ubuntu-18.04-clang-9,
|
||||
macOS-10.15-xcode-11.3.1,
|
||||
ubuntu-18.04-gcc-5-tbb,
|
||||
]
|
||||
name:
|
||||
[
|
||||
ubuntu-20.04-gcc-9,
|
||||
ubuntu-20.04-gcc-9-tbb,
|
||||
ubuntu-20.04-clang-9,
|
||||
macOS-11-xcode-13.4.1,
|
||||
]
|
||||
|
||||
build_type: [Debug, Release]
|
||||
build_type: [Release]
|
||||
python_version: [3]
|
||||
include:
|
||||
- name: ubuntu-18.04-gcc-5
|
||||
os: ubuntu-18.04
|
||||
compiler: gcc
|
||||
version: "5"
|
||||
|
||||
- name: ubuntu-18.04-gcc-9
|
||||
os: ubuntu-18.04
|
||||
- name: ubuntu-20.04-gcc-9
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
|
||||
- name: ubuntu-18.04-clang-9
|
||||
os: ubuntu-18.04
|
||||
compiler: clang
|
||||
version: "9"
|
||||
|
||||
# NOTE temporarily added this as it is a required check.
|
||||
- name: ubuntu-18.04-clang-9
|
||||
os: ubuntu-18.04
|
||||
compiler: clang
|
||||
version: "9"
|
||||
build_type: Debug
|
||||
python_version: "3"
|
||||
|
||||
- name: macOS-10.15-xcode-11.3.1
|
||||
os: macOS-10.15
|
||||
compiler: xcode
|
||||
version: "11.3.1"
|
||||
|
||||
- name: ubuntu-18.04-gcc-5-tbb
|
||||
os: ubuntu-18.04
|
||||
- name: ubuntu-20.04-gcc-9-tbb
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "5"
|
||||
version: "9"
|
||||
flag: tbb
|
||||
|
||||
- name: ubuntu-20.04-clang-9
|
||||
os: ubuntu-20.04
|
||||
compiler: clang
|
||||
version: "9"
|
||||
|
||||
- name: macOS-11-xcode-13.4.1
|
||||
os: macOS-11
|
||||
compiler: xcode
|
||||
version: "13.4.1"
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
|
@ -79,10 +73,10 @@ jobs:
|
|||
gpg -a --export $LLVM_KEY | sudo apt-key add -
|
||||
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
|
||||
fi
|
||||
|
||||
sudo apt-get -y update
|
||||
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev
|
||||
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libboost-all-dev
|
||||
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
|
||||
echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
|
@ -92,31 +86,37 @@ jobs:
|
|||
echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Install (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
brew tap ProfFan/robotics
|
||||
brew install cmake ninja
|
||||
brew install boost
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
brew install gcc@${{ matrix.version }}
|
||||
echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
else
|
||||
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
fi
|
||||
sudo xcode-select -switch /Applications/Xcode.app
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
|
||||
- name: Set GTSAM_WITH_TBB Flag
|
||||
if: matrix.flag == 'tbb'
|
||||
run: |
|
||||
echo "GTSAM_WITH_TBB=ON" >> $GITHUB_ENV
|
||||
echo "GTSAM Uses TBB"
|
||||
- name: Build (Linux)
|
||||
|
||||
- name: Set Swap Space
|
||||
if: runner.os == 'Linux'
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 6
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
bash .github/scripts/python.sh
|
||||
- name: Build (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
bash .github/scripts/python.sh -d
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash .github/scripts/python.sh
|
||||
bash .github/scripts/python.sh -b
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
bash .github/scripts/python.sh -t
|
||||
|
|
|
@ -2,6 +2,12 @@ name: Special Cases CI
|
|||
|
||||
on: [pull_request]
|
||||
|
||||
# Every time you make a push to your PR, it cancel immediately the previous checks,
|
||||
# and start a new one. The other runner will be available more quickly to your PR.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: ${{ matrix.name }} ${{ matrix.build_type }}
|
||||
|
@ -12,7 +18,6 @@ jobs:
|
|||
CTEST_PARALLEL_LEVEL: 2
|
||||
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
|
||||
GTSAM_BUILD_UNSTABLE: ON
|
||||
BOOST_VERSION: 1.67.0
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -22,61 +27,83 @@ jobs:
|
|||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
name:
|
||||
[
|
||||
ubuntu-gcc-deprecated,
|
||||
ubuntu-gcc-quaternions,
|
||||
ubuntu-gcc-tbb,
|
||||
ubuntu-cayleymap,
|
||||
ubuntu-clang-deprecated,
|
||||
ubuntu-clang-quaternions,
|
||||
ubuntu-clang-tbb,
|
||||
ubuntu-clang-cayleymap,
|
||||
ubuntu-clang-system-libs,
|
||||
ubuntu-no-boost,
|
||||
ubuntu-no-unstable,
|
||||
]
|
||||
|
||||
build_type: [Debug, Release]
|
||||
|
||||
include:
|
||||
- name: ubuntu-gcc-deprecated
|
||||
os: ubuntu-18.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
- name: ubuntu-clang-deprecated
|
||||
os: ubuntu-22.04
|
||||
compiler: clang
|
||||
version: "14"
|
||||
flag: deprecated
|
||||
|
||||
- name: ubuntu-gcc-quaternions
|
||||
os: ubuntu-18.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
- name: ubuntu-clang-quaternions
|
||||
os: ubuntu-22.04
|
||||
compiler: clang
|
||||
version: "14"
|
||||
flag: quaternions
|
||||
|
||||
- name: ubuntu-gcc-tbb
|
||||
os: ubuntu-18.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
- name: ubuntu-clang-tbb
|
||||
os: ubuntu-22.04
|
||||
compiler: clang
|
||||
version: "14"
|
||||
flag: tbb
|
||||
|
||||
- name: ubuntu-cayleymap
|
||||
os: ubuntu-18.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
- name: ubuntu-clang-cayleymap
|
||||
os: ubuntu-22.04
|
||||
compiler: clang
|
||||
version: "14"
|
||||
flag: cayley
|
||||
|
||||
- name: ubuntu-system-libs
|
||||
os: ubuntu-18.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
flag: system-libs
|
||||
- name: ubuntu-clang-system-libs
|
||||
os: ubuntu-22.04
|
||||
compiler: clang
|
||||
version: "14"
|
||||
flag: system
|
||||
|
||||
- name: ubuntu-no-boost
|
||||
os: ubuntu-22.04
|
||||
compiler: clang
|
||||
version: "14"
|
||||
flag: no_boost
|
||||
|
||||
- name: ubuntu-no-unstable
|
||||
os: ubuntu-22.04
|
||||
compiler: clang
|
||||
version: "14"
|
||||
flag: no_unstable
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
# LLVM 9 is not in Bionic's repositories so we add the official LLVM repository.
|
||||
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
|
||||
gpg --keyserver keyserver.ubuntu.com --recv-key $LLVM_KEY || gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-key $LLVM_KEY
|
||||
gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add -
|
||||
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
|
||||
fi
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install software-properties-common
|
||||
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython-dev python-numpy libicu-dev
|
||||
# LLVM (clang) 9/14 is not in 22.04 (jammy)'s repositories so we add the official LLVM repository.
|
||||
if [ "${{ matrix.compiler }}" = "clang" ]; then
|
||||
# (ipv4|ha).pool.sks-keyservers.net is the SKS GPG global keyserver pool
|
||||
# ipv4 avoids potential timeouts because of crappy IPv6 infrastructure
|
||||
# 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository
|
||||
# This key is not in the keystore by default for Ubuntu so we need to add it.
|
||||
LLVM_KEY=15CF4D18AF4F7421
|
||||
gpg --keyserver keyserver.ubuntu.com --recv-key $LLVM_KEY || gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-key $LLVM_KEY
|
||||
gpg -a --export $LLVM_KEY | sudo apt-key add -
|
||||
sudo add-apt-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy main"
|
||||
fi
|
||||
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev
|
||||
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
|
||||
|
@ -91,27 +118,21 @@ jobs:
|
|||
- name: Install Boost
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
bash .github/scripts/boost.sh
|
||||
sudo apt-get -y install libboost-all-dev
|
||||
|
||||
- name: Install (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
brew install cmake ninja boost
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
brew install gcc@${{ matrix.version }}
|
||||
echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
else
|
||||
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
fi
|
||||
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
|
||||
- name: Set Allow Deprecated Flag
|
||||
if: matrix.flag == 'deprecated'
|
||||
run: |
|
||||
echo "GTSAM_ALLOW_DEPRECATED_SINCE_V42=ON" >> $GITHUB_ENV
|
||||
echo "Allow deprecated since version 4.1"
|
||||
echo "GTSAM_ALLOW_DEPRECATED_SINCE_V43=ON" >> $GITHUB_ENV
|
||||
echo "Allow deprecated since version 4.3"
|
||||
|
||||
- name: Set Use Quaternions Flag
|
||||
if: matrix.flag == 'quaternions'
|
||||
|
@ -135,8 +156,31 @@ jobs:
|
|||
- name: Use system versions of 3rd party libraries
|
||||
if: matrix.flag == 'system'
|
||||
run: |
|
||||
sudo apt-get install libeigen3-dev
|
||||
echo "GTSAM_USE_SYSTEM_EIGEN=ON" >> $GITHUB_ENV
|
||||
echo "GTSAM_USE_SYSTEM_METIS=ON" >> $GITHUB_ENV
|
||||
# TODO(dellaert): This does not work yet?
|
||||
# sudo apt-get install metis
|
||||
# echo "GTSAM_USE_SYSTEM_METIS=ON" >> $GITHUB_ENV
|
||||
|
||||
- name: Turn off boost
|
||||
if: matrix.flag == 'no_boost'
|
||||
run: |
|
||||
echo "GTSAM_ENABLE_BOOST_SERIALIZATION=OFF" >> $GITHUB_ENV
|
||||
echo "GTSAM_USE_BOOST_FEATURES=OFF" >> $GITHUB_ENV
|
||||
echo "GTSAM will not use BOOST"
|
||||
|
||||
- name: Turn off unstable
|
||||
if: matrix.flag == 'no_unstable'
|
||||
run: |
|
||||
echo "GTSAM_BUILD_UNSTABLE=OFF" >> $GITHUB_ENV
|
||||
echo "GTSAM 'unstable' will not be built."
|
||||
|
||||
- name: Set Swap Space
|
||||
if: runner.os == 'Linux'
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 12
|
||||
|
||||
|
||||
- name: Build & Test
|
||||
run: |
|
||||
|
|
|
@ -2,6 +2,12 @@ name: Windows CI
|
|||
|
||||
on: [pull_request]
|
||||
|
||||
# Every time you make a push to your PR, it cancel immediately the previous checks,
|
||||
# and start a new one. The other runner will be available more quickly to your PR.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: ${{ matrix.name }} ${{ matrix.build_type }}
|
||||
|
@ -16,29 +22,20 @@ jobs:
|
|||
BOOST_EXE: boost_1_72_0-msvc-14.2
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
fail-fast: true
|
||||
matrix:
|
||||
# Github Actions requires a single row to be added to the build matrix.
|
||||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
name: [
|
||||
#TODO This build fails, need to understand why.
|
||||
# windows-2016-cl,
|
||||
windows-2019-cl,
|
||||
]
|
||||
|
||||
build_type: [
|
||||
Debug,
|
||||
#TODO(Varun) The release build takes over 2.5 hours, need to figure out why.
|
||||
# Release
|
||||
Release
|
||||
]
|
||||
build_unstable: [ON]
|
||||
include:
|
||||
#TODO This build fails, need to understand why.
|
||||
# - name: windows-2016-cl
|
||||
# os: windows-2016
|
||||
# compiler: cl
|
||||
# platform: 32
|
||||
|
||||
- name: windows-2019-cl
|
||||
os: windows-2019
|
||||
compiler: cl
|
||||
|
@ -48,7 +45,9 @@ jobs:
|
|||
- name: Install Dependencies
|
||||
shell: powershell
|
||||
run: |
|
||||
Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh')
|
||||
iwr -useb get.scoop.sh -outfile 'install_scoop.ps1'
|
||||
.\install_scoop.ps1 -RunAsAdmin
|
||||
|
||||
scoop install cmake --global # So we don't get issues with CMP0074 policy
|
||||
scoop install ninja --global
|
||||
|
||||
|
@ -92,7 +91,7 @@ jobs:
|
|||
echo "BOOST_ROOT=$BOOST_PATH" >> $env:GITHUB_ENV
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Configuration
|
||||
run: |
|
||||
|
@ -100,12 +99,67 @@ jobs:
|
|||
cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib"
|
||||
|
||||
- name: Build
|
||||
shell: bash
|
||||
run: |
|
||||
# Since Visual Studio is a multi-generator, we need to use --config
|
||||
# https://stackoverflow.com/a/24470998/1236990
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target gtsam
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target gtsam_unstable
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target wrap
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base_unstable
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.linear
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target gtsam
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target gtsam_unstable
|
||||
|
||||
# Target doesn't exist
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target wrap
|
||||
|
||||
- name: Test
|
||||
shell: bash
|
||||
run: |
|
||||
# Run GTSAM tests
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.base
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.basis
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.discrete
|
||||
|
||||
# Compilation error
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.geometry
|
||||
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.inference
|
||||
|
||||
# Compile. Fail with exception
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.linear
|
||||
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.navigation
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.sam
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.sfm
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.symbolic
|
||||
|
||||
# Compile. Fail with exception
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.hybrid
|
||||
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.nonlinear
|
||||
|
||||
# Compilation error
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.slam
|
||||
|
||||
|
||||
# Run GTSAM_UNSTABLE tests
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.base_unstable
|
||||
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.geometry_unstable
|
||||
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.linear_unstable
|
||||
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.discrete_unstable
|
||||
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.dynamics_unstable
|
||||
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.nonlinear_unstable
|
||||
|
||||
# Compilation error
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.slam_unstable
|
||||
|
||||
# Compilation error
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.partition
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
# This triggers building of packages
|
||||
name: Trigger Package Builds
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
jobs:
|
||||
trigger-package-build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger Package Rebuild
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
github-token: ${{ secrets.PACKAGING_REPO_ACCESS_TOKEN }}
|
||||
script: |
|
||||
await github.rest.actions.createWorkflowDispatch({
|
||||
owner: 'borglab-launchpad',
|
||||
repo: 'gtsam-packaging',
|
||||
workflow_id: 'main.yaml',
|
||||
ref: 'master'
|
||||
})
|
|
@ -17,3 +17,5 @@
|
|||
# for QtCreator:
|
||||
CMakeLists.txt.user*
|
||||
xcode/
|
||||
/Dockerfile
|
||||
/python/gtsam/notebooks/.ipynb_checkpoints/ellipses-checkpoint.ipynb
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
project(GTSAM CXX C)
|
||||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
# new feature to Cmake Version > 2.8.12
|
||||
|
@ -9,9 +8,9 @@ endif()
|
|||
|
||||
# Set the version number for the library
|
||||
set (GTSAM_VERSION_MAJOR 4)
|
||||
set (GTSAM_VERSION_MINOR 2)
|
||||
set (GTSAM_VERSION_MINOR 3)
|
||||
set (GTSAM_VERSION_PATCH 0)
|
||||
set (GTSAM_PRERELEASE_VERSION "a5")
|
||||
set (GTSAM_PRERELEASE_VERSION "a0")
|
||||
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
|
||||
|
||||
if (${GTSAM_VERSION_PATCH} EQUAL 0)
|
||||
|
@ -19,6 +18,11 @@ if (${GTSAM_VERSION_PATCH} EQUAL 0)
|
|||
else()
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}${GTSAM_PRERELEASE_VERSION}")
|
||||
endif()
|
||||
|
||||
project(GTSAM
|
||||
LANGUAGES CXX C
|
||||
VERSION "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
||||
|
||||
message(STATUS "GTSAM Version: ${GTSAM_VERSION_STRING}")
|
||||
|
||||
set (CMAKE_PROJECT_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
|
||||
|
@ -28,6 +32,14 @@ set (CMAKE_PROJECT_VERSION_PATCH ${GTSAM_VERSION_PATCH})
|
|||
###############################################################################
|
||||
# Gather information, perform checks, set defaults
|
||||
|
||||
if(MSVC)
|
||||
set(MSVC_LINKER_FLAGS "/FORCE:MULTIPLE")
|
||||
set(CMAKE_EXE_LINKER_FLAGS ${MSVC_LINKER_FLAGS})
|
||||
set(CMAKE_MODULE_LINKER_FLAGS ${MSVC_LINKER_FLAGS})
|
||||
set(CMAKE_SHARED_LINKER_FLAGS ${MSVC_LINKER_FLAGS})
|
||||
set(CMAKE_STATIC_LINKER_FLAGS ${MSVC_LINKER_FLAGS})
|
||||
endif()
|
||||
|
||||
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||
include(GtsamMakeConfigFile)
|
||||
include(GNUInstallDirs)
|
||||
|
@ -46,8 +58,24 @@ endif()
|
|||
|
||||
include(cmake/HandleGeneralOptions.cmake) # CMake build options
|
||||
|
||||
# Libraries:
|
||||
include(cmake/HandleBoost.cmake) # Boost
|
||||
############### Decide on BOOST ######################################
|
||||
# Enable or disable serialization with GTSAM_ENABLE_BOOST_SERIALIZATION
|
||||
option(GTSAM_ENABLE_BOOST_SERIALIZATION "Enable Boost serialization" ON)
|
||||
if(GTSAM_ENABLE_BOOST_SERIALIZATION)
|
||||
add_definitions(-DGTSAM_ENABLE_BOOST_SERIALIZATION)
|
||||
endif()
|
||||
|
||||
option(GTSAM_USE_BOOST_FEATURES "Enable Features that use Boost" ON)
|
||||
if(GTSAM_USE_BOOST_FEATURES)
|
||||
add_definitions(-DGTSAM_USE_BOOST_FEATURES)
|
||||
endif()
|
||||
|
||||
if(GTSAM_ENABLE_BOOST_SERIALIZATION OR GTSAM_USE_BOOST_FEATURES)
|
||||
include(cmake/HandleBoost.cmake)
|
||||
endif()
|
||||
######################################################################
|
||||
|
||||
# Other Libraries:
|
||||
include(cmake/HandleCCache.cmake) # ccache
|
||||
include(cmake/HandleCPack.cmake) # CPack
|
||||
include(cmake/HandleEigen.cmake) # Eigen3
|
||||
|
@ -83,7 +111,7 @@ add_subdirectory(timing)
|
|||
|
||||
# Build gtsam_unstable
|
||||
if (GTSAM_BUILD_UNSTABLE)
|
||||
add_subdirectory(gtsam_unstable)
|
||||
add_subdirectory(gtsam_unstable)
|
||||
endif()
|
||||
|
||||
# This is the new wrapper
|
||||
|
@ -97,8 +125,6 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
|
|||
# Copy matlab.h to the correct folder.
|
||||
configure_file(${PROJECT_SOURCE_DIR}/wrap/matlab.h
|
||||
${PROJECT_BINARY_DIR}/wrap/matlab.h COPYONLY)
|
||||
# Add the include directories so that matlab.h can be found
|
||||
include_directories("${PROJECT_BINARY_DIR}" "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}")
|
||||
|
||||
add_subdirectory(wrap)
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/wrap/cmake")
|
||||
|
|
|
@ -6,7 +6,6 @@ file(GLOB cppunitlite_src "*.cpp")
|
|||
add_library(CppUnitLite STATIC ${cppunitlite_src} ${cppunitlite_headers})
|
||||
list(APPEND GTSAM_EXPORTED_TARGETS CppUnitLite)
|
||||
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
||||
target_link_libraries(CppUnitLite PUBLIC Boost::boost) # boost/lexical_cast.h
|
||||
|
||||
gtsam_assign_source_folders("${cppunitlite_headers};${cppunitlite_src}") # MSVC project structure
|
||||
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
#include "TestResult.h"
|
||||
#include "Failure.h"
|
||||
|
||||
#include <boost/lexical_cast.hpp>
|
||||
|
||||
Test::Test (const std::string& testName)
|
||||
: name_ (testName), next_(0), lineNumber_(-1), safeCheck_(true)
|
||||
{
|
||||
|
@ -47,10 +45,10 @@ bool Test::check(long expected, long actual, TestResult& result, const std::stri
|
|||
result.addFailure (
|
||||
Failure (
|
||||
name_,
|
||||
boost::lexical_cast<std::string> (__FILE__),
|
||||
std::string(__FILE__),
|
||||
__LINE__,
|
||||
boost::lexical_cast<std::string> (expected),
|
||||
boost::lexical_cast<std::string> (actual)));
|
||||
std::to_string(expected),
|
||||
std::to_string(actual)));
|
||||
|
||||
return false;
|
||||
|
||||
|
@ -64,7 +62,7 @@ bool Test::check(const std::string& expected, const std::string& actual, TestRes
|
|||
result.addFailure (
|
||||
Failure (
|
||||
name_,
|
||||
boost::lexical_cast<std::string> (__FILE__),
|
||||
std::string(__FILE__),
|
||||
__LINE__,
|
||||
expected,
|
||||
actual));
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
|
||||
#include <cmath>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <string>
|
||||
|
||||
class TestResult;
|
||||
|
||||
|
@ -32,7 +32,7 @@ class Test
|
|||
public:
|
||||
Test (const std::string& testName);
|
||||
Test (const std::string& testName, const std::string& filename, long lineNumber, bool safeCheck);
|
||||
virtual ~Test() {};
|
||||
virtual ~Test() {}
|
||||
|
||||
virtual void run (TestResult& result) = 0;
|
||||
|
||||
|
@ -63,7 +63,6 @@ protected:
|
|||
#define TEST(testGroup, testName)\
|
||||
class testGroup##testName##Test : public Test \
|
||||
{ public: testGroup##testName##Test () : Test (#testName "Test", __FILE__, __LINE__, true) {} \
|
||||
virtual ~testGroup##testName##Test () {};\
|
||||
void run (TestResult& result_) override;} \
|
||||
testGroup##testName##Instance; \
|
||||
void testGroup##testName##Test::run (TestResult& result_)
|
||||
|
@ -81,7 +80,7 @@ protected:
|
|||
#define TEST_UNSAFE(testGroup, testName)\
|
||||
class testGroup##testName##Test : public Test \
|
||||
{ public: testGroup##testName##Test () : Test (#testName "Test", __FILE__, __LINE__, false) {} \
|
||||
virtual ~testGroup##testName##Test () {};\
|
||||
virtual ~testGroup##testName##Test () {} \
|
||||
void run (TestResult& result_) override;} \
|
||||
testGroup##testName##Instance; \
|
||||
void testGroup##testName##Test::run (TestResult& result_)
|
||||
|
@ -112,17 +111,17 @@ protected:
|
|||
|
||||
#define THROWS_EXCEPTION(condition)\
|
||||
{ try { condition; \
|
||||
result_.addFailure (Failure (name_, __FILE__,__LINE__, std::string("Didn't throw: ") + boost::lexical_cast<std::string>(#condition))); \
|
||||
result_.addFailure (Failure (name_, __FILE__,__LINE__, std::string("Didn't throw: ") + std::string(#condition))); \
|
||||
return; } \
|
||||
catch (...) {} }
|
||||
|
||||
#define CHECK_EXCEPTION(condition, exception_name)\
|
||||
{ try { condition; \
|
||||
result_.addFailure (Failure (name_, __FILE__,__LINE__, std::string("Didn't throw: ") + boost::lexical_cast<std::string>(#condition))); \
|
||||
result_.addFailure (Failure (name_, __FILE__,__LINE__, std::string("Didn't throw: ") + std::string(#condition))); \
|
||||
return; } \
|
||||
catch (exception_name&) {} \
|
||||
catch (...) { \
|
||||
result_.addFailure (Failure (name_, __FILE__,__LINE__, std::string("Wrong exception: ") + boost::lexical_cast<std::string>(#condition) + boost::lexical_cast<std::string>(", expected: ") + boost::lexical_cast<std::string>(#exception_name))); \
|
||||
result_.addFailure (Failure (name_, __FILE__,__LINE__, std::string("Wrong exception: ") + std::string(#condition) + std::string(", expected: ") + std::string(#exception_name))); \
|
||||
return; } }
|
||||
|
||||
#define EQUALITY(expected,actual)\
|
||||
|
@ -130,21 +129,21 @@ protected:
|
|||
result_.addFailure(Failure(name_, __FILE__, __LINE__, #expected, #actual)); }
|
||||
|
||||
#define CHECK_EQUAL(expected,actual)\
|
||||
{ if ((expected) == (actual)) return; result_.addFailure(Failure(name_, __FILE__, __LINE__, boost::lexical_cast<std::string>(expected), boost::lexical_cast<std::string>(actual))); }
|
||||
{ if ((expected) == (actual)) return; result_.addFailure(Failure(name_, __FILE__, __LINE__, std::to_string(expected), std::to_string(actual))); }
|
||||
|
||||
#define LONGS_EQUAL(expected,actual)\
|
||||
{ long actualTemp = actual; \
|
||||
long expectedTemp = expected; \
|
||||
if ((expectedTemp) != (actualTemp)) \
|
||||
{ result_.addFailure (Failure (name_, __FILE__, __LINE__, boost::lexical_cast<std::string>(expectedTemp), \
|
||||
boost::lexical_cast<std::string>(actualTemp))); return; } }
|
||||
{ result_.addFailure (Failure (name_, __FILE__, __LINE__, std::to_string(expectedTemp), \
|
||||
std::to_string(actualTemp))); return; } }
|
||||
|
||||
#define DOUBLES_EQUAL(expected,actual,threshold)\
|
||||
{ double actualTemp = actual; \
|
||||
double expectedTemp = expected; \
|
||||
if (!std::isfinite(actualTemp) || !std::isfinite(expectedTemp) || fabs ((expectedTemp)-(actualTemp)) > threshold) \
|
||||
{ result_.addFailure (Failure (name_, __FILE__, __LINE__, \
|
||||
boost::lexical_cast<std::string>((double)expectedTemp), boost::lexical_cast<std::string>((double)actualTemp))); return; } }
|
||||
std::to_string((double)expectedTemp), std::to_string((double)actualTemp))); return; } }
|
||||
|
||||
|
||||
/* EXPECTs: tests will continue running after a failure */
|
||||
|
@ -156,15 +155,15 @@ boost::lexical_cast<std::string>((double)expectedTemp), boost::lexical_cast<std:
|
|||
{ long actualTemp = actual; \
|
||||
long expectedTemp = expected; \
|
||||
if ((expectedTemp) != (actualTemp)) \
|
||||
{ result_.addFailure (Failure (name_, __FILE__, __LINE__, boost::lexical_cast<std::string>(expectedTemp), \
|
||||
boost::lexical_cast<std::string>(actualTemp))); } }
|
||||
{ result_.addFailure (Failure (name_, __FILE__, __LINE__, std::to_string(expectedTemp), \
|
||||
std::to_string(actualTemp))); } }
|
||||
|
||||
#define EXPECT_DOUBLES_EQUAL(expected,actual,threshold)\
|
||||
{ double actualTemp = actual; \
|
||||
double expectedTemp = expected; \
|
||||
if (!std::isfinite(actualTemp) || !std::isfinite(expectedTemp) || fabs ((expectedTemp)-(actualTemp)) > threshold) \
|
||||
{ result_.addFailure (Failure (name_, __FILE__, __LINE__, \
|
||||
boost::lexical_cast<std::string>((double)expectedTemp), boost::lexical_cast<std::string>((double)actualTemp))); } }
|
||||
std::to_string((double)expectedTemp), std::to_string((double)actualTemp))); } }
|
||||
|
||||
|
||||
#define FAIL(text) \
|
||||
|
|
|
@ -27,7 +27,7 @@ class TestResult
|
|||
{
|
||||
public:
|
||||
TestResult ();
|
||||
virtual ~TestResult() {};
|
||||
virtual ~TestResult() {}
|
||||
virtual void testsStarted ();
|
||||
virtual void addFailure (const Failure& failure);
|
||||
virtual void testsEnded ();
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
### Coding Conventions
|
||||
|
||||
* Classes are Uppercase, methods and functions lowerMixedCase.
|
||||
* We use a modified K&R Style, with 2-space tabs, inserting spaces for tabs.
|
||||
* Use meaningful variable names, e.g. `measurement` not `msm`.
|
||||
* Apart from those naming conventions, we adopt Google C++ style.
|
||||
* Use meaningful variable names, e.g. `measurement` not `msm`, avoid abbreviations.
|
||||
|
||||
|
||||
### Windows
|
||||
|
@ -15,7 +15,7 @@ For example:
|
|||
```cpp
|
||||
class GTSAM_EXPORT MyClass { ... };
|
||||
|
||||
GTSAM_EXPORT myFunction();
|
||||
GTSAM_EXPORT return_type myFunction();
|
||||
```
|
||||
|
||||
More details [here](Using-GTSAM-EXPORT.md).
|
||||
|
|
|
@ -166,7 +166,7 @@ Concept Checks
|
|||
|
||||
Boost provides a nice way to check whether a given type satisfies a concept. For example, the following
|
||||
|
||||
BOOST_CONCEPT_ASSERT(IsVectorSpace<Point2>)
|
||||
GTSAM_CONCEPT_ASSERT(IsVectorSpace<Point2>)
|
||||
|
||||
asserts that Point2 indeed is a model for the VectorSpace concept.
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ $ make install
|
|||
## Important Installation Notes
|
||||
|
||||
1. GTSAM requires the following libraries to be installed on your system:
|
||||
- BOOST version 1.65 or greater (install through Linux repositories or MacPorts). Please see [Boost Notes](#boost-notes).
|
||||
- BOOST version 1.65 or greater (install through Linux repositories or MacPorts). Please see [Boost Notes](#boost-notes) for version recommendations based on your compiler.
|
||||
|
||||
- Cmake version 3.0 or higher
|
||||
- Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher
|
||||
|
@ -50,7 +50,7 @@ will run up to 10x faster in Release mode! See the end of this document for
|
|||
additional debugging tips.
|
||||
|
||||
3. GTSAM has Doxygen documentation. To generate, run 'make doc' from your
|
||||
build directory.
|
||||
build directory after setting the `GTSAM_BUILD_DOCS` and `GTSAM_BUILD_[HTML|LATEX]` cmake flags.
|
||||
|
||||
4. The instructions below install the library to the default system install path and
|
||||
build all components. From a terminal, starting in the root library folder,
|
||||
|
@ -72,7 +72,7 @@ execute commands as follows for an out-of-source build:
|
|||
Versions of Boost prior to 1.65 have a known bug that prevents proper "deep" serialization of objects, which means that objects encapsulated inside other objects don't get serialized.
|
||||
This is particularly seen when using `clang` as the C++ compiler.
|
||||
|
||||
For this reason we require Boost>=1.65, and recommend installing it through alternative channels when it is not available through your operating system's primary package manager.
|
||||
For this reason we recommend Boost>=1.65, and recommend installing it through alternative channels when it is not available through your operating system's primary package manager.
|
||||
|
||||
## Known Issues
|
||||
|
||||
|
|
59
README.md
59
README.md
|
@ -2,9 +2,9 @@
|
|||
|
||||
**Important Note**
|
||||
|
||||
As of Dec 2021, the `develop` branch is officially in "Pre 4.2" mode. A great new feature we will be adding in 4.2 is *hybrid inference* a la DCSLAM (Kevin Doherty et al) and we envision several API-breaking changes will happen in the discrete folder.
|
||||
**As of January 2023, the `develop` branch is officially in "Pre 4.3" mode. We envision several API-breaking changes as we switch to C++17 and away from boost.**
|
||||
|
||||
In addition, features deprecated in 4.1 will be removed. Please use the last [4.1.1 release](https://github.com/borglab/gtsam/releases/tag/4.1.1) if you need those features. However, most (not all, unfortunately) are easily converted and can be tracked down (in 4.1.1) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V42`.
|
||||
In addition, features deprecated in 4.2 will be removed. Please use the last [4.2a8 release](https://github.com/borglab/gtsam/releases/tag/4.2a8) if you need those features. However, most are easily converted and can be tracked down (in 4.2) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V42`.
|
||||
|
||||
## What is GTSAM?
|
||||
|
||||
|
@ -31,11 +31,11 @@ In the root library folder execute:
|
|||
|
||||
```sh
|
||||
#!bash
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake ..
|
||||
$ make check (optional, runs unit tests)
|
||||
$ make install
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make check (optional, runs unit tests)
|
||||
make install
|
||||
```
|
||||
|
||||
Prerequisites:
|
||||
|
@ -55,15 +55,48 @@ Optional prerequisites - used automatically if findable by CMake:
|
|||
|
||||
GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. It also introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect.
|
||||
|
||||
GTSAM 4 also deprecated some legacy functionality and wrongly named methods. If you are on a 4.0.X release, you can define the flag `GTSAM_ALLOW_DEPRECATED_SINCE_V4` to use the deprecated methods.
|
||||
|
||||
GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionality. There is a flag `GTSAM_ALLOW_DEPRECATED_SINCE_V42` for newly deprecated methods since the 4.1 release, which is on by default, allowing anyone to just pull version 4.1 and compile.
|
||||
|
||||
|
||||
## Wrappers
|
||||
|
||||
We provide support for [MATLAB](matlab/README.md) and [Python](python/README.md) wrappers for GTSAM. Please refer to the linked documents for more details.
|
||||
|
||||
## Citation
|
||||
|
||||
If you are using GTSAM for academic work, please use the following citation:
|
||||
|
||||
```bibtex
|
||||
@software{gtsam,
|
||||
author = {Frank Dellaert and GTSAM Contributors},
|
||||
title = {borglab/gtsam},
|
||||
month = May,
|
||||
year = 2022,
|
||||
publisher = {Georgia Tech Borg Lab},
|
||||
version = {4.2a8},
|
||||
doi = {10.5281/zenodo.5794541},
|
||||
url = {https://github.com/borglab/gtsam)}}
|
||||
}
|
||||
```
|
||||
|
||||
To cite the `Factor Graphs for Robot Perception` book, please use:
|
||||
```bibtex
|
||||
@book{factor_graphs_for_robot_perception,
|
||||
author={Frank Dellaert and Michael Kaess},
|
||||
year={2017},
|
||||
title={Factor Graphs for Robot Perception},
|
||||
publisher={Foundations and Trends in Robotics, Vol. 6},
|
||||
url={http://www.cs.cmu.edu/~kaess/pub/Dellaert17fnt.pdf}
|
||||
}
|
||||
```
|
||||
|
||||
If you are using the IMU preintegration scheme, please cite:
|
||||
```bibtex
|
||||
@book{imu_preintegration,
|
||||
author={Christian Forster and Luca Carlone and Frank Dellaert and Davide Scaramuzza},
|
||||
title={IMU preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation},
|
||||
year={2015}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## The Preintegrated IMU Factor
|
||||
|
||||
GTSAM includes a state of the art IMU handling scheme based on
|
||||
|
@ -73,7 +106,7 @@ GTSAM includes a state of the art IMU handling scheme based on
|
|||
Our implementation improves on this using integration on the manifold, as detailed in
|
||||
|
||||
- Luca Carlone, Zsolt Kira, Chris Beall, Vadim Indelman, and Frank Dellaert, _"Eliminating conditionally independent sets in factor graphs: a unifying perspective based on smart factors"_, Int. Conf. on Robotics and Automation (ICRA), 2014. [[link]](https://ieeexplore.ieee.org/abstract/document/6907483)
|
||||
- Christian Forster, Luca Carlone, Frank Dellaert, and Davide Scaramuzza, "IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation", Robotics: Science and Systems (RSS), 2015. [[link]](http://www.roboticsproceedings.org/rss11/p06.pdf)
|
||||
- Christian Forster, Luca Carlone, Frank Dellaert, and Davide Scaramuzza, _"IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation"_, Robotics: Science and Systems (RSS), 2015. [[link]](http://www.roboticsproceedings.org/rss11/p06.pdf)
|
||||
|
||||
If you are using the factor in academic work, please cite the publications above.
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ To create a DLL in windows, the `GTSAM_EXPORT` keyword has been created and need
|
|||
* At least one of the functions inside that class is declared in a .cpp file and not just the .h file.
|
||||
* You can `GTSAM_EXPORT` any class it inherits from as well. (Note that this implictly requires the class does not derive from a "header-only" class. Note that Eigen is a "header-only" library, so if your class derives from Eigen, _do not_ use `GTSAM_EXPORT` in the class definition!)
|
||||
3. If you have defined a class using `GTSAM_EXPORT`, do not use `GTSAM_EXPORT` in any of its individual function declarations. (Note that you _can_ put `GTSAM_EXPORT` in the definition of individual functions within a class as long as you don't put `GTSAM_EXPORT` in the class definition.)
|
||||
4. For template specializations, you need to add `GTSAM_EXPORT` to each individual specialization.
|
||||
|
||||
## When is GTSAM_EXPORT being used incorrectly
|
||||
Unfortunately, using `GTSAM_EXPORT` incorrectly often does not cause a compiler or linker error in the library that is being compiled, but only when you try to use that DLL in a different library. For example, an error in `gtsam/base` will often show up when compiling the `check_base_program` or the MATLAB wrapper, but not when compiling/linking gtsam itself. The most common errors will say something like:
|
||||
|
|
|
@ -21,6 +21,14 @@ else()
|
|||
find_dependency(Boost @BOOST_FIND_MINIMUM_VERSION@ COMPONENTS @BOOST_FIND_MINIMUM_COMPONENTS@)
|
||||
endif()
|
||||
|
||||
if(@GTSAM_USE_TBB@)
|
||||
find_dependency(TBB 4.4 COMPONENTS tbb tbbmalloc)
|
||||
endif()
|
||||
|
||||
if(@GTSAM_USE_SYSTEM_EIGEN@)
|
||||
find_dependency(Eigen3 REQUIRED)
|
||||
endif()
|
||||
|
||||
# Load exports
|
||||
include(${OUR_CMAKE_DIR}/@PACKAGE_NAME@-exports.cmake)
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,81 +0,0 @@
|
|||
# - Try to find Eigen3 lib
|
||||
#
|
||||
# This module supports requiring a minimum version, e.g. you can do
|
||||
# find_package(Eigen3 3.1.2)
|
||||
# to require version 3.1.2 or newer of Eigen3.
|
||||
#
|
||||
# Once done this will define
|
||||
#
|
||||
# EIGEN3_FOUND - system has eigen lib with correct version
|
||||
# EIGEN3_INCLUDE_DIR - the eigen include directory
|
||||
# EIGEN3_VERSION - eigen version
|
||||
|
||||
# Copyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>
|
||||
# Copyright (c) 2008, 2009 Gael Guennebaud, <g.gael@free.fr>
|
||||
# Copyright (c) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
# Redistribution and use is allowed according to the terms of the 2-clause BSD license.
|
||||
|
||||
if(NOT Eigen3_FIND_VERSION)
|
||||
if(NOT Eigen3_FIND_VERSION_MAJOR)
|
||||
set(Eigen3_FIND_VERSION_MAJOR 2)
|
||||
endif(NOT Eigen3_FIND_VERSION_MAJOR)
|
||||
if(NOT Eigen3_FIND_VERSION_MINOR)
|
||||
set(Eigen3_FIND_VERSION_MINOR 91)
|
||||
endif(NOT Eigen3_FIND_VERSION_MINOR)
|
||||
if(NOT Eigen3_FIND_VERSION_PATCH)
|
||||
set(Eigen3_FIND_VERSION_PATCH 0)
|
||||
endif(NOT Eigen3_FIND_VERSION_PATCH)
|
||||
|
||||
set(Eigen3_FIND_VERSION "${Eigen3_FIND_VERSION_MAJOR}.${Eigen3_FIND_VERSION_MINOR}.${Eigen3_FIND_VERSION_PATCH}")
|
||||
endif(NOT Eigen3_FIND_VERSION)
|
||||
|
||||
macro(_eigen3_check_version)
|
||||
file(READ "${EIGEN3_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h" _eigen3_version_header)
|
||||
|
||||
string(REGEX MATCH "define[ \t]+EIGEN_WORLD_VERSION[ \t]+([0-9]+)" _eigen3_world_version_match "${_eigen3_version_header}")
|
||||
set(EIGEN3_WORLD_VERSION "${CMAKE_MATCH_1}")
|
||||
string(REGEX MATCH "define[ \t]+EIGEN_MAJOR_VERSION[ \t]+([0-9]+)" _eigen3_major_version_match "${_eigen3_version_header}")
|
||||
set(EIGEN3_MAJOR_VERSION "${CMAKE_MATCH_1}")
|
||||
string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen3_minor_version_match "${_eigen3_version_header}")
|
||||
set(EIGEN3_MINOR_VERSION "${CMAKE_MATCH_1}")
|
||||
|
||||
set(EIGEN3_VERSION ${EIGEN3_WORLD_VERSION}.${EIGEN3_MAJOR_VERSION}.${EIGEN3_MINOR_VERSION})
|
||||
if(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})
|
||||
set(EIGEN3_VERSION_OK FALSE)
|
||||
else(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})
|
||||
set(EIGEN3_VERSION_OK TRUE)
|
||||
endif(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})
|
||||
|
||||
if(NOT EIGEN3_VERSION_OK)
|
||||
|
||||
message(STATUS "Eigen3 version ${EIGEN3_VERSION} found in ${EIGEN3_INCLUDE_DIR}, "
|
||||
"but at least version ${Eigen3_FIND_VERSION} is required")
|
||||
endif(NOT EIGEN3_VERSION_OK)
|
||||
endmacro(_eigen3_check_version)
|
||||
|
||||
if (EIGEN3_INCLUDE_DIR)
|
||||
|
||||
# in cache already
|
||||
_eigen3_check_version()
|
||||
set(EIGEN3_FOUND ${EIGEN3_VERSION_OK})
|
||||
|
||||
else (EIGEN3_INCLUDE_DIR)
|
||||
|
||||
find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library
|
||||
PATHS
|
||||
${CMAKE_INSTALL_PREFIX}/include
|
||||
${KDE4_INCLUDE_DIR}
|
||||
PATH_SUFFIXES eigen3 eigen
|
||||
)
|
||||
|
||||
if(EIGEN3_INCLUDE_DIR)
|
||||
_eigen3_check_version()
|
||||
endif(EIGEN3_INCLUDE_DIR)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(Eigen3 DEFAULT_MSG EIGEN3_INCLUDE_DIR EIGEN3_VERSION_OK)
|
||||
|
||||
mark_as_advanced(EIGEN3_INCLUDE_DIR)
|
||||
|
||||
endif(EIGEN3_INCLUDE_DIR)
|
||||
|
|
@ -1,323 +0,0 @@
|
|||
# The MIT License (MIT)
|
||||
#
|
||||
# Copyright (c) 2015 Justus Calvin
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
#
|
||||
# FindTBB
|
||||
# -------
|
||||
#
|
||||
# Find TBB include directories and libraries.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# find_package(TBB [major[.minor]] [EXACT]
|
||||
# [QUIET] [REQUIRED]
|
||||
# [[COMPONENTS] [components...]]
|
||||
# [OPTIONAL_COMPONENTS components...])
|
||||
#
|
||||
# where the allowed components are tbbmalloc and tbb_preview. Users may modify
|
||||
# the behavior of this module with the following variables:
|
||||
#
|
||||
# * TBB_ROOT_DIR - The base directory the of TBB installation.
|
||||
# * TBB_INCLUDE_DIR - The directory that contains the TBB headers files.
|
||||
# * TBB_LIBRARY - The directory that contains the TBB library files.
|
||||
# * TBB_<library>_LIBRARY - The path of the TBB the corresponding TBB library.
|
||||
# These libraries, if specified, override the
|
||||
# corresponding library search results, where <library>
|
||||
# may be tbb, tbb_debug, tbbmalloc, tbbmalloc_debug,
|
||||
# tbb_preview, or tbb_preview_debug.
|
||||
# * TBB_USE_DEBUG_BUILD - The debug version of tbb libraries, if present, will
|
||||
# be used instead of the release version.
|
||||
#
|
||||
# Users may modify the behavior of this module with the following environment
|
||||
# variables:
|
||||
#
|
||||
# * TBB_INSTALL_DIR
|
||||
# * TBBROOT
|
||||
# * LIBRARY_PATH
|
||||
#
|
||||
# This module will set the following variables:
|
||||
#
|
||||
# * TBB_FOUND - Set to false, or undefined, if we haven’t found, or
|
||||
# don’t want to use TBB.
|
||||
# * TBB_<component>_FOUND - If False, optional <component> part of TBB sytem is
|
||||
# not available.
|
||||
# * TBB_VERSION - The full version string
|
||||
# * TBB_VERSION_MAJOR - The major version
|
||||
# * TBB_VERSION_MINOR - The minor version
|
||||
# * TBB_INTERFACE_VERSION - The interface version number defined in
|
||||
# tbb/tbb_stddef.h.
|
||||
# * TBB_<library>_LIBRARY_RELEASE - The path of the TBB release version of
|
||||
# <library>, where <library> may be tbb, tbb_debug,
|
||||
# tbbmalloc, tbbmalloc_debug, tbb_preview, or
|
||||
# tbb_preview_debug.
|
||||
# * TBB_<library>_LIBRARY_DEGUG - The path of the TBB release version of
|
||||
# <library>, where <library> may be tbb, tbb_debug,
|
||||
# tbbmalloc, tbbmalloc_debug, tbb_preview, or
|
||||
# tbb_preview_debug.
|
||||
#
|
||||
# The following varibles should be used to build and link with TBB:
|
||||
#
|
||||
# * TBB_INCLUDE_DIRS - The include directory for TBB.
|
||||
# * TBB_LIBRARIES - The libraries to link against to use TBB.
|
||||
# * TBB_LIBRARIES_RELEASE - The release libraries to link against to use TBB.
|
||||
# * TBB_LIBRARIES_DEBUG - The debug libraries to link against to use TBB.
|
||||
# * TBB_DEFINITIONS - Definitions to use when compiling code that uses
|
||||
# TBB.
|
||||
# * TBB_DEFINITIONS_RELEASE - Definitions to use when compiling release code that
|
||||
# uses TBB.
|
||||
# * TBB_DEFINITIONS_DEBUG - Definitions to use when compiling debug code that
|
||||
# uses TBB.
|
||||
#
|
||||
# This module will also create the "tbb" target that may be used when building
|
||||
# executables and libraries.
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
|
||||
if(NOT TBB_FOUND)
|
||||
|
||||
##################################
|
||||
# Check the build type
|
||||
##################################
|
||||
|
||||
if(NOT DEFINED TBB_USE_DEBUG_BUILD)
|
||||
# Set build type to RELEASE by default for optimization.
|
||||
set(TBB_BUILD_TYPE RELEASE)
|
||||
elseif(TBB_USE_DEBUG_BUILD)
|
||||
set(TBB_BUILD_TYPE DEBUG)
|
||||
else()
|
||||
set(TBB_BUILD_TYPE RELEASE)
|
||||
endif()
|
||||
|
||||
##################################
|
||||
# Set the TBB search directories
|
||||
##################################
|
||||
|
||||
# Define search paths based on user input and environment variables
|
||||
set(TBB_SEARCH_DIR ${TBB_ROOT_DIR} $ENV{TBB_INSTALL_DIR} $ENV{TBBROOT})
|
||||
|
||||
# Define the search directories based on the current platform
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
|
||||
set(TBB_DEFAULT_SEARCH_DIR "C:/Program Files/Intel/TBB"
|
||||
"C:/Program Files (x86)/Intel/TBB")
|
||||
|
||||
# Set the target architecture
|
||||
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||
set(TBB_ARCHITECTURE "intel64")
|
||||
else()
|
||||
set(TBB_ARCHITECTURE "ia32")
|
||||
endif()
|
||||
|
||||
# Set the TBB search library path search suffix based on the version of VC
|
||||
if(WINDOWS_STORE)
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc11_ui")
|
||||
elseif(MSVC14)
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc14")
|
||||
elseif(MSVC12)
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc12")
|
||||
elseif(MSVC11)
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc11")
|
||||
elseif(MSVC10)
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc10")
|
||||
endif()
|
||||
|
||||
# Add the library path search suffix for the VC independent version of TBB
|
||||
list(APPEND TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc_mt")
|
||||
|
||||
elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
|
||||
# OS X
|
||||
set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb"
|
||||
"/usr/local/opt/tbb")
|
||||
|
||||
# TODO: Check to see which C++ library is being used by the compiler.
|
||||
if(NOT ${CMAKE_SYSTEM_VERSION} VERSION_LESS 13.0)
|
||||
# The default C++ library on OS X 10.9 and later is libc++
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/libc++" "lib")
|
||||
else()
|
||||
set(TBB_LIB_PATH_SUFFIX "lib")
|
||||
endif()
|
||||
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
# Linux
|
||||
set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb")
|
||||
|
||||
# TODO: Check compiler version to see the suffix should be <arch>/gcc4.1 or
|
||||
# <arch>/gcc4.1. For now, assume that the compiler is more recent than
|
||||
# gcc 4.4.x or later.
|
||||
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/intel64/gcc4.4")
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^i.86$")
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/ia32/gcc4.4")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
##################################
|
||||
# Find the TBB include dir
|
||||
##################################
|
||||
|
||||
find_path(TBB_INCLUDE_DIRS tbb/tbb.h
|
||||
HINTS ${TBB_INCLUDE_DIR} ${TBB_SEARCH_DIR}
|
||||
PATHS ${TBB_DEFAULT_SEARCH_DIR}
|
||||
PATH_SUFFIXES include)
|
||||
|
||||
##################################
|
||||
# Set version strings
|
||||
##################################
|
||||
|
||||
if(TBB_INCLUDE_DIRS)
|
||||
set(_tbb_version_file_prior_to_tbb_2021_1 "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h")
|
||||
set(_tbb_version_file_after_tbb_2021_1 "${TBB_INCLUDE_DIRS}/oneapi/tbb/version.h")
|
||||
|
||||
if (EXISTS "${_tbb_version_file_prior_to_tbb_2021_1}")
|
||||
file(READ "${_tbb_version_file_prior_to_tbb_2021_1}" _tbb_version_file )
|
||||
elseif (EXISTS "${_tbb_version_file_after_tbb_2021_1}")
|
||||
file(READ "${_tbb_version_file_after_tbb_2021_1}" _tbb_version_file )
|
||||
else()
|
||||
message(FATAL_ERROR "Found TBB installation: ${TBB_INCLUDE_DIRS} "
|
||||
"missing version header.")
|
||||
endif()
|
||||
|
||||
string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1"
|
||||
TBB_VERSION_MAJOR "${_tbb_version_file}")
|
||||
string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1"
|
||||
TBB_VERSION_MINOR "${_tbb_version_file}")
|
||||
string(REGEX REPLACE ".*#define TBB_INTERFACE_VERSION ([0-9]+).*" "\\1"
|
||||
TBB_INTERFACE_VERSION "${_tbb_version_file}")
|
||||
set(TBB_VERSION "${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR}")
|
||||
endif()
|
||||
|
||||
##################################
|
||||
# Find TBB components
|
||||
##################################
|
||||
|
||||
if(TBB_VERSION VERSION_LESS 4.3)
|
||||
set(TBB_SEARCH_COMPOMPONENTS tbb_preview tbbmalloc tbb)
|
||||
else()
|
||||
set(TBB_SEARCH_COMPOMPONENTS tbb_preview tbbmalloc_proxy tbbmalloc tbb)
|
||||
endif()
|
||||
|
||||
# Find each component
|
||||
foreach(_comp ${TBB_SEARCH_COMPOMPONENTS})
|
||||
if(";${TBB_FIND_COMPONENTS};tbb;" MATCHES ";${_comp};")
|
||||
|
||||
# Search for the libraries
|
||||
find_library(TBB_${_comp}_LIBRARY_RELEASE ${_comp}
|
||||
HINTS ${TBB_LIBRARY} ${TBB_SEARCH_DIR}
|
||||
PATHS ${TBB_DEFAULT_SEARCH_DIR} ENV LIBRARY_PATH
|
||||
PATH_SUFFIXES ${TBB_LIB_PATH_SUFFIX})
|
||||
|
||||
find_library(TBB_${_comp}_LIBRARY_DEBUG ${_comp}_debug
|
||||
HINTS ${TBB_LIBRARY} ${TBB_SEARCH_DIR}
|
||||
PATHS ${TBB_DEFAULT_SEARCH_DIR} ENV LIBRARY_PATH
|
||||
PATH_SUFFIXES ${TBB_LIB_PATH_SUFFIX})
|
||||
|
||||
if(TBB_${_comp}_LIBRARY_DEBUG)
|
||||
list(APPEND TBB_LIBRARIES_DEBUG "${TBB_${_comp}_LIBRARY_DEBUG}")
|
||||
endif()
|
||||
if(TBB_${_comp}_LIBRARY_RELEASE)
|
||||
list(APPEND TBB_LIBRARIES_RELEASE "${TBB_${_comp}_LIBRARY_RELEASE}")
|
||||
endif()
|
||||
if(TBB_${_comp}_LIBRARY_${TBB_BUILD_TYPE} AND NOT TBB_${_comp}_LIBRARY)
|
||||
set(TBB_${_comp}_LIBRARY "${TBB_${_comp}_LIBRARY_${TBB_BUILD_TYPE}}")
|
||||
endif()
|
||||
|
||||
if(TBB_${_comp}_LIBRARY AND EXISTS "${TBB_${_comp}_LIBRARY}")
|
||||
set(TBB_${_comp}_FOUND TRUE)
|
||||
else()
|
||||
set(TBB_${_comp}_FOUND FALSE)
|
||||
endif()
|
||||
|
||||
# Mark internal variables as advanced
|
||||
mark_as_advanced(TBB_${_comp}_LIBRARY_RELEASE)
|
||||
mark_as_advanced(TBB_${_comp}_LIBRARY_DEBUG)
|
||||
mark_as_advanced(TBB_${_comp}_LIBRARY)
|
||||
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
##################################
|
||||
# Set compile flags and libraries
|
||||
##################################
|
||||
|
||||
set(TBB_DEFINITIONS_RELEASE "")
|
||||
set(TBB_DEFINITIONS_DEBUG "-DTBB_USE_DEBUG=1")
|
||||
|
||||
if(TBB_LIBRARIES_${TBB_BUILD_TYPE})
|
||||
set(TBB_DEFINITIONS "${TBB_DEFINITIONS_${TBB_BUILD_TYPE}}")
|
||||
set(TBB_LIBRARIES "${TBB_LIBRARIES_${TBB_BUILD_TYPE}}")
|
||||
elseif(TBB_LIBRARIES_RELEASE)
|
||||
set(TBB_DEFINITIONS "${TBB_DEFINITIONS_RELEASE}")
|
||||
set(TBB_LIBRARIES "${TBB_LIBRARIES_RELEASE}")
|
||||
elseif(TBB_LIBRARIES_DEBUG)
|
||||
set(TBB_DEFINITIONS "${TBB_DEFINITIONS_DEBUG}")
|
||||
set(TBB_LIBRARIES "${TBB_LIBRARIES_DEBUG}")
|
||||
endif()
|
||||
|
||||
find_package_handle_standard_args(TBB
|
||||
REQUIRED_VARS TBB_INCLUDE_DIRS TBB_LIBRARIES
|
||||
HANDLE_COMPONENTS
|
||||
VERSION_VAR TBB_VERSION)
|
||||
|
||||
##################################
|
||||
# Create targets
|
||||
##################################
|
||||
|
||||
if(NOT CMAKE_VERSION VERSION_LESS 3.0 AND TBB_FOUND)
|
||||
# Start fix to support different targets for tbb, tbbmalloc, etc.
|
||||
# (Jose Luis Blanco, Jan 2019)
|
||||
# Iterate over tbb, tbbmalloc, etc.
|
||||
foreach(libname ${TBB_SEARCH_COMPOMPONENTS})
|
||||
if ((NOT TBB_${libname}_LIBRARY_RELEASE) AND (NOT TBB_${libname}_LIBRARY_DEBUG))
|
||||
continue()
|
||||
endif()
|
||||
|
||||
add_library(${libname} SHARED IMPORTED)
|
||||
|
||||
set_target_properties(${libname} PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${TBB_INCLUDE_DIRS}
|
||||
IMPORTED_LOCATION ${TBB_${libname}_LIBRARY_RELEASE})
|
||||
if(TBB_${libname}_LIBRARY_RELEASE AND TBB_${libname}_LIBRARY_DEBUG)
|
||||
set_target_properties(${libname} PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS "$<$<OR:$<CONFIG:Debug>,$<CONFIG:RelWithDebInfo>>:TBB_USE_DEBUG=1>"
|
||||
IMPORTED_LOCATION_DEBUG ${TBB_${libname}_LIBRARY_DEBUG}
|
||||
IMPORTED_LOCATION_RELWITHDEBINFO ${TBB_${libname}_LIBRARY_DEBUG}
|
||||
IMPORTED_LOCATION_RELEASE ${TBB_${libname}_LIBRARY_RELEASE}
|
||||
IMPORTED_LOCATION_MINSIZEREL ${TBB_${libname}_LIBRARY_RELEASE}
|
||||
)
|
||||
elseif(TBB_${libname}_LIBRARY_RELEASE)
|
||||
set_target_properties(${libname} PROPERTIES IMPORTED_LOCATION ${TBB_${libname}_LIBRARY_RELEASE})
|
||||
else()
|
||||
set_target_properties(${libname} PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS "${TBB_DEFINITIONS_DEBUG}"
|
||||
IMPORTED_LOCATION ${TBB_${libname}_LIBRARY_DEBUG}
|
||||
)
|
||||
endif()
|
||||
endforeach()
|
||||
# End of fix to support different targets
|
||||
endif()
|
||||
|
||||
mark_as_advanced(TBB_INCLUDE_DIRS TBB_LIBRARIES)
|
||||
|
||||
unset(TBB_ARCHITECTURE)
|
||||
unset(TBB_BUILD_TYPE)
|
||||
unset(TBB_LIB_PATH_SUFFIX)
|
||||
unset(TBB_DEFAULT_SEARCH_DIR)
|
||||
|
||||
endif()
|
|
@ -1,6 +1,6 @@
|
|||
include(CheckCXXCompilerFlag) # for check_cxx_compiler_flag()
|
||||
|
||||
# Set cmake policy to recognize the AppleClang compiler
|
||||
# Set cmake policy to recognize the Apple Clang compiler
|
||||
# independently from the Clang compiler.
|
||||
if(POLICY CMP0025)
|
||||
cmake_policy(SET CMP0025 NEW)
|
||||
|
@ -87,12 +87,19 @@ if(MSVC)
|
|||
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE
|
||||
WINDOWS_LEAN_AND_MEAN
|
||||
NOMINMAX
|
||||
)
|
||||
)
|
||||
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC
|
||||
_ENABLE_EXTENDED_ALIGNED_STORAGE
|
||||
)
|
||||
# Avoid literally hundreds to thousands of warnings:
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC
|
||||
/wd4267 # warning C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data
|
||||
/wd4267 # warning C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data
|
||||
)
|
||||
|
||||
add_compile_options(/wd4005)
|
||||
add_compile_options(/wd4101)
|
||||
add_compile_options(/wd4834)
|
||||
|
||||
endif()
|
||||
|
||||
# Other (non-preprocessor macros) compiler flags:
|
||||
|
@ -122,6 +129,8 @@ else()
|
|||
-fPIC # ensure proper code generation for shared libraries
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Wreturn-local-addr -Werror=return-local-addr> # Error: return local address
|
||||
$<$<CXX_COMPILER_ID:Clang>:-Wreturn-stack-address -Werror=return-stack-address> # Error: return local address
|
||||
$<$<CXX_COMPILER_ID:Clang>:-Wno-weak-template-vtables> # TODO(dellaert): don't know how to resolve
|
||||
$<$<CXX_COMPILER_ID:Clang>:-Wno-weak-vtables> # TODO(dellaert): don't know how to resolve
|
||||
-Wreturn-type -Werror=return-type # Error on missing return()
|
||||
-Wformat -Werror=format-security # Error on wrong printf() arguments
|
||||
$<$<COMPILE_LANGUAGE:CXX>:${flag_override_}> # Enforce the use of the override keyword
|
||||
|
@ -134,16 +143,19 @@ else()
|
|||
set(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING -g -O3 CACHE STRING "(User editable) Private compiler flags for Timing configuration.")
|
||||
endif()
|
||||
|
||||
# Enable C++11:
|
||||
# Enable C++17:
|
||||
if (NOT CMAKE_VERSION VERSION_LESS 3.8)
|
||||
set(GTSAM_COMPILE_FEATURES_PUBLIC "cxx_std_11" CACHE STRING "CMake compile features property for all gtsam targets.")
|
||||
set(GTSAM_COMPILE_FEATURES_PUBLIC "cxx_std_17" CACHE STRING "CMake compile features property for all gtsam targets.")
|
||||
# See: https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html
|
||||
# This is to enable -std=c++11 instead of -std=g++11
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
if (MSVC)
|
||||
# NOTE(jlblanco): seems to be required in addition to the cxx_std_17 above?
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC /std:c++17)
|
||||
endif()
|
||||
else()
|
||||
# Old cmake versions:
|
||||
if (NOT MSVC)
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-std=c++11>)
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-std=c++17>)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
@ -179,19 +191,43 @@ set(CMAKE_EXE_LINKER_FLAGS_PROFILING ${GTSAM_CMAKE_EXE_LINKER_FLAGS_PROFILING})
|
|||
|
||||
# Clang uses a template depth that is less than standard and is too small
|
||||
if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")
|
||||
# Apple Clang before 5.0 does not support -ftemplate-depth.
|
||||
if(NOT (APPLE AND "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "5.0"))
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-ftemplate-depth=1024")
|
||||
endif()
|
||||
# Apple Clang before 5.0 does not support -ftemplate-depth.
|
||||
if(NOT (APPLE AND "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "5.0"))
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-ftemplate-depth=1024")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (NOT MSVC)
|
||||
option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" ON)
|
||||
option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" OFF)
|
||||
if(GTSAM_BUILD_WITH_MARCH_NATIVE)
|
||||
# Add as public flag so all dependant projects also use it, as required
|
||||
# by Eigen to avid crashes due to SIMD vectorization:
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native")
|
||||
endif()
|
||||
# Check if Apple OS and compiler is [Apple]Clang
|
||||
if(APPLE AND (${CMAKE_CXX_COMPILER_ID} MATCHES "^(Apple)?Clang$"))
|
||||
# Check Clang version since march=native is only supported for version 15.0+.
|
||||
if("${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "15.0")
|
||||
if(NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
|
||||
# Add as public flag so all dependent projects also use it, as required
|
||||
# by Eigen to avoid crashes due to SIMD vectorization:
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native")
|
||||
else()
|
||||
message(WARNING "Option GTSAM_BUILD_WITH_MARCH_NATIVE ignored, because native architecture is not supported for Apple silicon and AppleClang version < 15.0.")
|
||||
endif() # CMAKE_SYSTEM_PROCESSOR
|
||||
else()
|
||||
# Add as public flag so all dependent projects also use it, as required
|
||||
# by Eigen to avoid crashes due to SIMD vectorization:
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native")
|
||||
endif() # CMAKE_CXX_COMPILER_VERSION
|
||||
else()
|
||||
include(CheckCXXCompilerFlag)
|
||||
CHECK_CXX_COMPILER_FLAG("-march=native" COMPILER_SUPPORTS_MARCH_NATIVE)
|
||||
if(COMPILER_SUPPORTS_MARCH_NATIVE)
|
||||
# Add as public flag so all dependent projects also use it, as required
|
||||
# by Eigen to avoid crashes due to SIMD vectorization:
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native")
|
||||
else()
|
||||
message(WARNING "Option GTSAM_BUILD_WITH_MARCH_NATIVE ignored, because native architecture is not supported.")
|
||||
endif() # COMPILER_SUPPORTS_MARCH_NATIVE
|
||||
endif() # APPLE
|
||||
endif() # GTSAM_BUILD_WITH_MARCH_NATIVE
|
||||
endif()
|
||||
|
||||
# Set up build type library postfixes
|
||||
|
|
|
@ -51,11 +51,10 @@ function(print_build_options_for_target target_name_)
|
|||
# print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE)
|
||||
print_padded(GTSAM_COMPILE_DEFINITIONS_PUBLIC)
|
||||
|
||||
foreach(build_type ${GTSAM_CMAKE_CONFIGURATION_TYPES})
|
||||
string(TOUPPER "${build_type}" build_type_toupper)
|
||||
# print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE_${build_type_toupper})
|
||||
print_padded(GTSAM_COMPILE_OPTIONS_PUBLIC_${build_type_toupper})
|
||||
# print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE_${build_type_toupper})
|
||||
print_padded(GTSAM_COMPILE_DEFINITIONS_PUBLIC_${build_type_toupper})
|
||||
endforeach()
|
||||
string(TOUPPER "${CMAKE_BUILD_TYPE}" build_type_toupper)
|
||||
# print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE_${build_type_toupper})
|
||||
print_padded(GTSAM_COMPILE_OPTIONS_PUBLIC_${build_type_toupper})
|
||||
# print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE_${build_type_toupper})
|
||||
print_padded(GTSAM_COMPILE_DEFINITIONS_PUBLIC_${build_type_toupper})
|
||||
|
||||
endfunction()
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
###############################################################################
|
||||
# Option for using system Eigen or GTSAM-bundled Eigen
|
||||
|
||||
option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF)
|
||||
|
||||
if(NOT GTSAM_USE_SYSTEM_EIGEN)
|
||||
|
@ -11,10 +10,14 @@ endif()
|
|||
|
||||
# Switch for using system Eigen or GTSAM-bundled Eigen
|
||||
if(GTSAM_USE_SYSTEM_EIGEN)
|
||||
find_package(Eigen3 REQUIRED)
|
||||
# Since Eigen 3.3.0 a Eigen3Config.cmake is available so use it.
|
||||
find_package(Eigen3 CONFIG REQUIRED) # need to find again as REQUIRED
|
||||
|
||||
# Use generic Eigen include paths e.g. <Eigen/Core>
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}")
|
||||
# The actual include directory (for BUILD cmake target interface):
|
||||
# Note: EIGEN3_INCLUDE_DIR points to some random location on some eigen
|
||||
# versions. So here I use the target itself to get the proper include
|
||||
# directory (it is generated by cmake, thus has the correct path)
|
||||
get_target_property(GTSAM_EIGEN_INCLUDE_FOR_BUILD Eigen3::Eigen INTERFACE_INCLUDE_DIRECTORIES)
|
||||
|
||||
# check if MKL is also enabled - can have one or the other, but not both!
|
||||
# Note: Eigen >= v3.2.5 includes our patches
|
||||
|
@ -27,9 +30,6 @@ if(GTSAM_USE_SYSTEM_EIGEN)
|
|||
if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4))
|
||||
message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.")
|
||||
endif()
|
||||
|
||||
# The actual include directory (for BUILD cmake target interface):
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}")
|
||||
else()
|
||||
# Use bundled Eigen include path.
|
||||
# Clear any variables set by FindEigen3
|
||||
|
@ -42,7 +42,20 @@ else()
|
|||
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/")
|
||||
|
||||
# The actual include directory (for BUILD cmake target interface):
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${GTSAM_SOURCE_DIR}/gtsam/3rdparty/Eigen/")
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${GTSAM_SOURCE_DIR}/gtsam/3rdparty/Eigen")
|
||||
|
||||
add_library(gtsam_eigen3 INTERFACE)
|
||||
|
||||
target_include_directories(gtsam_eigen3 INTERFACE
|
||||
$<BUILD_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_BUILD}>
|
||||
$<INSTALL_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}>
|
||||
)
|
||||
add_library(Eigen3::Eigen ALIAS gtsam_eigen3)
|
||||
|
||||
install(TARGETS gtsam_eigen3 EXPORT GTSAM-exports PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
|
||||
|
||||
list(APPEND GTSAM_EXPORTED_TARGETS gtsam_eigen3)
|
||||
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}")
|
||||
endif()
|
||||
|
||||
# Detect Eigen version:
|
||||
|
@ -69,7 +82,7 @@ else()
|
|||
endif ()
|
||||
|
||||
if (MSVC)
|
||||
if (BUILD_SHARED_LIBS)
|
||||
if (GTSAM_SHARED_LIB)
|
||||
# mute eigen static assert to avoid errors in shared lib
|
||||
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
|
||||
endif()
|
||||
|
|
|
@ -14,21 +14,45 @@ if(GTSAM_UNSTABLE_AVAILABLE)
|
|||
option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON)
|
||||
option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF)
|
||||
endif()
|
||||
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
||||
option(GTSAM_FORCE_SHARED_LIB "Force gtsam to be a shared library, overriding BUILD_SHARED_LIBS" ON)
|
||||
option(GTSAM_FORCE_STATIC_LIB "Force gtsam to be a static library, overriding BUILD_SHARED_LIBS" OFF)
|
||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
||||
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
|
||||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
|
||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||
option(GTSAM_ENABLE_MEMORY_SANITIZER "Enable/Disable memory sanitizer" OFF)
|
||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
|
||||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V42 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V43 "Allow use of methods/functions deprecated in GTSAM 4.3" ON)
|
||||
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
|
||||
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
|
||||
option(GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR "Use the slower but correct version of BetweenFactor" OFF)
|
||||
option(GTSAM_SLOW_BUT_CORRECT_EXPMAP "Use slower but correct expmap for Pose2" OFF)
|
||||
|
||||
if (GTSAM_FORCE_SHARED_LIB)
|
||||
message(STATUS "GTSAM is a shared library due to GTSAM_FORCE_SHARED_LIB")
|
||||
set(GTSAM_LIBRARY_TYPE SHARED CACHE STRING "" FORCE)
|
||||
set(GTSAM_SHARED_LIB 1 CACHE BOOL "" FORCE)
|
||||
elseif (GTSAM_FORCE_STATIC_LIB)
|
||||
message(STATUS "GTSAM is a static library due to GTSAM_FORCE_STATIC_LIB")
|
||||
set(GTSAM_LIBRARY_TYPE STATIC CACHE STRING "" FORCE)
|
||||
set(GTSAM_SHARED_LIB 0 CACHE BOOL "" FORCE)
|
||||
elseif (BUILD_SHARED_LIBS)
|
||||
message(STATUS "GTSAM is a shared library due to BUILD_SHARED_LIBS is ON")
|
||||
set(GTSAM_LIBRARY_TYPE SHARED CACHE STRING "" FORCE)
|
||||
set(GTSAM_SHARED_LIB 1 CACHE BOOL "" FORCE)
|
||||
elseif((DEFINED BUILD_SHARED_LIBS) AND (NOT BUILD_SHARED_LIBS))
|
||||
message(STATUS "GTSAM is a static library due to BUILD_SHARED_LIBS is OFF")
|
||||
set(GTSAM_LIBRARY_TYPE STATIC CACHE STRING "" FORCE)
|
||||
set(GTSAM_SHARED_LIB 0 CACHE BOOL "" FORCE)
|
||||
else()
|
||||
message(FATAL_ERROR "Please, to unambiguously select the desired library type to use to build GTSAM, set one of GTSAM_FORCE_SHARED_LIB=ON, GTSAM_FORCE_STATIC_LIB=ON, or BUILD_SHARED_LIBS={ON/OFF}")
|
||||
endif()
|
||||
|
||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||
option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
|
||||
endif()
|
||||
|
|
|
@ -14,11 +14,11 @@ endif()
|
|||
# or explicit instantiation will generate build errors.
|
||||
# See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017
|
||||
#
|
||||
if(MSVC AND BUILD_SHARED_LIBS)
|
||||
if(MSVC AND GTSAM_SHARED_LIB)
|
||||
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
|
||||
endif()
|
||||
|
||||
if (APPLE AND BUILD_SHARED_LIBS)
|
||||
if (APPLE AND GTSAM_SHARED_LIB)
|
||||
# Set the default install directory on macOS
|
||||
set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib")
|
||||
endif()
|
||||
|
@ -50,3 +50,10 @@ if(GTSAM_ENABLE_CONSISTENCY_CHECKS)
|
|||
# This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h
|
||||
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS)
|
||||
endif()
|
||||
|
||||
if(GTSAM_ENABLE_MEMORY_SANITIZER)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address -fsanitize=leak -g")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address -fsanitize=leak -g")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address -fsanitize=leak")
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fsanitize=address -fsanitize=leak")
|
||||
endif()
|
||||
|
|
|
@ -21,7 +21,12 @@ if(GTSAM_USE_SYSTEM_METIS)
|
|||
mark_as_advanced(METIS_LIBRARY)
|
||||
|
||||
add_library(metis-gtsam-if INTERFACE)
|
||||
target_include_directories(metis-gtsam-if BEFORE INTERFACE ${METIS_INCLUDE_DIR})
|
||||
target_include_directories(metis-gtsam-if BEFORE INTERFACE ${METIS_INCLUDE_DIR}
|
||||
# gtsam_unstable/partition/FindSeparator-inl.h uses internal metislib.h API
|
||||
# via extern "C"
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
|
||||
)
|
||||
target_link_libraries(metis-gtsam-if INTERFACE ${METIS_LIBRARY})
|
||||
endif()
|
||||
else()
|
||||
|
@ -30,10 +35,12 @@ else()
|
|||
add_subdirectory(${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis)
|
||||
|
||||
target_include_directories(metis-gtsam BEFORE PUBLIC
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/metis/>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/include>
|
||||
# gtsam_unstable/partition/FindSeparator-inl.h uses internal metislib.h API
|
||||
# via extern "C"
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/metis/>
|
||||
)
|
||||
|
||||
add_library(metis-gtsam-if INTERFACE)
|
||||
|
|
|
@ -14,7 +14,7 @@ print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts
|
|||
if (DOXYGEN_FOUND)
|
||||
print_enabled_config(${GTSAM_BUILD_DOCS} "Build Docs")
|
||||
endif()
|
||||
print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries")
|
||||
print_enabled_config(${GTSAM_SHARED_LIB} "Build shared GTSAM libraries")
|
||||
print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name")
|
||||
if(GTSAM_UNSTABLE_AVAILABLE)
|
||||
print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ")
|
||||
|
@ -29,10 +29,13 @@ if(NOT MSVC AND NOT XCODE_VERSION)
|
|||
print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
|
||||
endif()
|
||||
|
||||
print_config("Enable Boost serialization" "${GTSAM_ENABLE_BOOST_SERIALIZATION}")
|
||||
|
||||
print_build_options_for_target(gtsam)
|
||||
|
||||
print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
|
||||
print_config("Use System Metis" "${GTSAM_USE_SYSTEM_METIS}")
|
||||
print_config("Using Boost version" "${Boost_VERSION}")
|
||||
|
||||
if(GTSAM_USE_TBB)
|
||||
print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})")
|
||||
|
@ -84,9 +87,10 @@ print_config("CPack Generator" "${CPACK_GENERATOR}")
|
|||
message(STATUS "GTSAM flags ")
|
||||
print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
|
||||
print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ")
|
||||
print_enabled_config(${GTSAM_ENABLE_MEMORY_SANITIZER} "Build with Memory Sanitizer ")
|
||||
print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
|
||||
print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
|
||||
print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V42} "Allow features deprecated in GTSAM 4.1")
|
||||
print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V43} "Allow features deprecated in GTSAM 4.3")
|
||||
print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
|
||||
print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
|
|||
|
||||
set(Python_VERSION_MAJOR ${PYTHON_VERSION_MAJOR})
|
||||
set(Python_VERSION_MINOR ${PYTHON_VERSION_MINOR})
|
||||
set(Python_VERSION_PATCH ${PYTHON_VERSION_PATCH})
|
||||
set(Python_EXECUTABLE ${PYTHON_EXECUTABLE})
|
||||
|
||||
else()
|
||||
|
@ -31,11 +32,12 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
|
|||
|
||||
set(Python_VERSION_MAJOR ${Python3_VERSION_MAJOR})
|
||||
set(Python_VERSION_MINOR ${Python3_VERSION_MINOR})
|
||||
set(Python_VERSION_PATCH ${Python3_VERSION_PATCH})
|
||||
|
||||
endif()
|
||||
|
||||
set(GTSAM_PYTHON_VERSION
|
||||
"${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}"
|
||||
"${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}.${Python_VERSION_PATCH}"
|
||||
CACHE STRING "The version of Python to build the wrappers against."
|
||||
FORCE)
|
||||
|
||||
|
|
|
@ -7,10 +7,6 @@ if (GTSAM_WITH_TBB)
|
|||
if(TBB_FOUND)
|
||||
set(GTSAM_USE_TBB 1) # This will go into config.h
|
||||
|
||||
# if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
|
||||
# message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
|
||||
# endif()
|
||||
|
||||
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
|
||||
set(TBB_GREATER_EQUAL_2020 1)
|
||||
else()
|
||||
|
@ -18,7 +14,7 @@ if (GTSAM_WITH_TBB)
|
|||
endif()
|
||||
# all definitions and link requisites will go via imported targets:
|
||||
# tbb & tbbmalloc
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES TBB::tbb TBB::tbbmalloc)
|
||||
else()
|
||||
set(GTSAM_USE_TBB 0) # This will go into config.h
|
||||
endif()
|
||||
|
|
|
@ -31,10 +31,10 @@
|
|||
|
||||
// Whether GTSAM is compiled as static or DLL in windows.
|
||||
// This will be used to decide whether include __declspec(dllimport) or not in headers
|
||||
#cmakedefine BUILD_SHARED_LIBS
|
||||
#cmakedefine GTSAM_SHARED_LIB
|
||||
|
||||
#ifdef _WIN32
|
||||
# ifndef BUILD_SHARED_LIBS
|
||||
# ifndef GTSAM_SHARED_LIB
|
||||
# define @library_name@_EXPORT
|
||||
# define @library_name@_EXTERN_EXPORT extern
|
||||
# else
|
||||
|
@ -56,5 +56,5 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#undef BUILD_SHARED_LIBS
|
||||
#undef GTSAM_SHARED_LIB
|
||||
|
||||
|
|
|
@ -12,6 +12,6 @@ add_executable(example
|
|||
)
|
||||
|
||||
# By using CMake exported targets, a simple "link" dependency introduces the
|
||||
# include directories (-I) flags, links against Boost, and add any other
|
||||
# include directories (-I) flags, and add any other
|
||||
# required build flags (e.g. C++11, etc.)
|
||||
target_link_libraries(example PRIVATE gtsam)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# Macro for adding categorized tests in a "tests" folder, with
|
||||
# optional exclusion of tests and convenience library linking options
|
||||
#
|
||||
# By default, all tests are linked with CppUnitLite and boost
|
||||
# By default, all tests are linked with CppUnitLite
|
||||
# Arguments:
|
||||
# - subdir The name of the category for this test
|
||||
# - local_libs A list of convenience libraries to use (if GTSAM_BUILD_CONVENIENCE_LIBRARIES is true)
|
||||
|
@ -32,7 +32,6 @@ endfunction()
|
|||
# Macro for adding categorized timing scripts in a "tests" folder, with
|
||||
# optional exclusion of tests and convenience library linking options
|
||||
#
|
||||
# By default, all tests are linked with boost
|
||||
# Arguments:
|
||||
# - subdir The name of the category for this timing script
|
||||
# - local_libs A list of convenience libraries to use (if GTSAM_BUILD_CONVENIENCE_LIBRARIES is true)
|
||||
|
@ -51,8 +50,7 @@ macro(gtsam_add_subdir_timing subdir local_libs full_libs excluded_srcs)
|
|||
endmacro()
|
||||
|
||||
# Macro for adding executables matching a pattern - builds one executable for
|
||||
# each file matching the pattern. These exectuables are automatically linked
|
||||
# with boost.
|
||||
# each file matching the pattern.
|
||||
# Arguments:
|
||||
# - pattern The glob pattern to match source files
|
||||
# - local_libs A list of convenience libraries to use (if GTSAM_BUILD_CONVENIENCE_LIBRARIES is true)
|
||||
|
@ -138,9 +136,9 @@ macro(gtsam_add_grouped_scripts group pattern target_prefix pretty_prefix_name l
|
|||
|
||||
# Linking and dependendencies
|
||||
if (GTSAM_BUILD_CONVENIENCE_LIBRARIES)
|
||||
target_link_libraries(${script_bin} ${local_libs} ${GTSAM_BOOST_LIBRARIES})
|
||||
target_link_libraries(${script_bin} ${local_libs})
|
||||
else()
|
||||
target_link_libraries(${script_bin} ${full_libs} ${GTSAM_BOOST_LIBRARIES})
|
||||
target_link_libraries(${script_bin} ${full_libs})
|
||||
endif()
|
||||
|
||||
# Add .run target
|
||||
|
|
|
@ -26,6 +26,7 @@ if (GTSAM_BUILD_DOCS)
|
|||
gtsam/basis
|
||||
gtsam/discrete
|
||||
gtsam/geometry
|
||||
gtsam/hybrid
|
||||
gtsam/inference
|
||||
gtsam/linear
|
||||
gtsam/navigation
|
||||
|
@ -33,7 +34,6 @@ if (GTSAM_BUILD_DOCS)
|
|||
gtsam/sam
|
||||
gtsam/sfm
|
||||
gtsam/slam
|
||||
gtsam/smart
|
||||
gtsam/symbolic
|
||||
gtsam
|
||||
)
|
||||
|
@ -42,10 +42,12 @@ if (GTSAM_BUILD_DOCS)
|
|||
set(gtsam_unstable_doc_subdirs
|
||||
gtsam_unstable/base
|
||||
gtsam_unstable/discrete
|
||||
gtsam_unstable/dynamics
|
||||
gtsam_unstable/geometry
|
||||
gtsam_unstable/linear
|
||||
gtsam_unstable/nonlinear
|
||||
gtsam_unstable/slam
|
||||
gtsam_unstable/dynamics
|
||||
gtsam_unstable/nonlinear
|
||||
gtsam_unstable/partition
|
||||
gtsam_unstable/slam
|
||||
gtsam_unstable
|
||||
)
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// add unary measurement factors, like GPS, on all three poses
|
||||
noiseModel::Diagonal::shared_ptr unaryNoise =
|
||||
auto unaryNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector2(0.1, 0.1)); // 10cm std on x,y
|
||||
graph.add(boost::make_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise));
|
||||
graph.add(boost::make_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise));
|
||||
graph.add(boost::make_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise));
|
||||
graph.emplace_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise);
|
||||
graph.emplace_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise);
|
||||
graph.emplace_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise);
|
||||
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
class UnaryFactor: public NoiseModelFactor1<Pose2> {
|
||||
double mx_, my_; ///< X and Y measurements
|
||||
|
||||
|
||||
public:
|
||||
|
||||
// Provide access to the Matrix& version of evaluateError:
|
||||
using gtsam::NoiseModelFactor1<Pose2>::evaluateError;
|
||||
|
||||
UnaryFactor(Key j, double x, double y, const SharedNoiseModel& model):
|
||||
NoiseModelFactor1<Pose2>(model, j), mx_(x), my_(y) {}
|
||||
|
||||
Vector evaluateError(const Pose2& q,
|
||||
boost::optional<Matrix&> H = boost::none) const
|
||||
{
|
||||
Vector evaluateError(const Pose2& q, OptionalMatrixType H) const override {
|
||||
const Rot2& R = q.rotation();
|
||||
if (H) (*H) = (gtsam::Matrix(2, 3) <<
|
||||
R.c(), -R.s(), 0.0,
|
||||
|
|
|
@ -3,13 +3,11 @@ NonlinearFactorGraph graph;
|
|||
|
||||
// Add a Gaussian prior on pose x_1
|
||||
Pose2 priorMean(0.0, 0.0, 0.0);
|
||||
noiseModel::Diagonal::shared_ptr priorNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
|
||||
graph.addPrior(1, priorMean, priorNoise);
|
||||
auto priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
|
||||
graph.add(PriorFactor<Pose2>(1, priorMean, priorNoise));
|
||||
|
||||
// Add two odometry factors
|
||||
Pose2 odometry(2.0, 0.0, 0.0);
|
||||
noiseModel::Diagonal::shared_ptr odometryNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
|
||||
auto odometryNoise = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
|
||||
graph.add(BetweenFactor<Pose2>(1, 2, odometry, odometryNoise));
|
||||
graph.add(BetweenFactor<Pose2>(2, 3, odometry, odometryNoise));
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
Factor Graph:
|
||||
size: 3
|
||||
factor 0: PriorFactor on 1
|
||||
prior mean: (0, 0, 0)
|
||||
|
||||
Factor 0: PriorFactor on 1
|
||||
prior mean: (0, 0, 0)
|
||||
noise model: diagonal sigmas [0.3; 0.3; 0.1];
|
||||
factor 1: BetweenFactor(1,2)
|
||||
measured: (2, 0, 0)
|
||||
noise model: diagonal sigmas [0.2; 0.2; 0.1];
|
||||
factor 2: BetweenFactor(2,3)
|
||||
measured: (2, 0, 0)
|
||||
|
||||
Factor 1: BetweenFactor(1,2)
|
||||
measured: (2, 0, 0)
|
||||
noise model: diagonal sigmas [0.2; 0.2; 0.1];
|
||||
|
||||
Factor 2: BetweenFactor(2,3)
|
||||
measured: (2, 0, 0)
|
||||
noise model: diagonal sigmas [0.2; 0.2; 0.1];
|
|
@ -1,11 +1,23 @@
|
|||
Initial Estimate:
|
||||
|
||||
Values with 3 values:
|
||||
Value 1: (0.5, 0, 0.2)
|
||||
Value 2: (2.3, 0.1, -0.2)
|
||||
Value 3: (4.1, 0.1, 0.1)
|
||||
Value 1: (gtsam::Pose2)
|
||||
(0.5, 0, 0.2)
|
||||
|
||||
Value 2: (gtsam::Pose2)
|
||||
(2.3, 0.1, -0.2)
|
||||
|
||||
Value 3: (gtsam::Pose2)
|
||||
(4.1, 0.1, 0.1)
|
||||
|
||||
Final Result:
|
||||
|
||||
Values with 3 values:
|
||||
Value 1: (-1.8e-16, 8.7e-18, -9.1e-19)
|
||||
Value 2: (2, 7.4e-18, -2.5e-18)
|
||||
Value 3: (4, -1.8e-18, -3.1e-18)
|
||||
Value 1: (gtsam::Pose2)
|
||||
(7.5-16, -5.3-16, -1.8-16)
|
||||
|
||||
Value 2: (gtsam::Pose2)
|
||||
(2, -1.1-15, -2.5-16)
|
||||
|
||||
Value 3: (gtsam::Pose2)
|
||||
(4, -1.7-15, -2.5-16)
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
x1 covariance:
|
||||
0.09 1.1e-47 5.7e-33
|
||||
1.1e-47 0.09 1.9e-17
|
||||
5.7e-33 1.9e-17 0.01
|
||||
0.09 1.7e-33 2.8e-33
|
||||
1.7e-33 0.09 2.6e-17
|
||||
2.8e-33 2.6e-17 0.01
|
||||
x2 covariance:
|
||||
0.13 4.7e-18 2.4e-18
|
||||
4.7e-18 0.17 0.02
|
||||
2.4e-18 0.02 0.02
|
||||
0.13 1.2e-18 6.1e-19
|
||||
1.2e-18 0.17 0.02
|
||||
6.1e-19 0.02 0.02
|
||||
x3 covariance:
|
||||
0.17 2.7e-17 8.4e-18
|
||||
2.7e-17 0.37 0.06
|
||||
8.4e-18 0.06 0.03
|
||||
0.17 8.6e-18 2.7e-18
|
||||
8.6e-18 0.37 0.06
|
||||
2.7e-18 0.06 0.03
|
|
@ -5,6 +5,7 @@
|
|||
\textclass article
|
||||
\begin_preamble
|
||||
\usepackage{color}
|
||||
\usepackage{listings}
|
||||
|
||||
\definecolor{mygreen}{rgb}{0,0.6,0}
|
||||
\definecolor{mygray}{rgb}{0.5,0.5,0.5}
|
||||
|
@ -271,7 +272,7 @@ color{red}{// Make 'private' any typedefs that must be redefined in derived
|
|||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
typedef boost::shared_ptr<This> shared_ptr; ///< Shared pointer to
|
||||
typedef std::shared_ptr<This> shared_ptr; ///< Shared pointer to
|
||||
this
|
||||
\end_layout
|
||||
|
||||
|
@ -304,7 +305,7 @@ color{red}{// Make 'public' the typedefs that will be valid in the derived
|
|||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
typedef boost::shared_ptr<FACTOR> sharedFactor; ///< Shared pointer
|
||||
typedef std::shared_ptr<FACTOR> sharedFactor; ///< Shared pointer
|
||||
to a factor
|
||||
\end_layout
|
||||
|
||||
|
|
2973
doc/Doxyfile.in
2973
doc/Doxyfile.in
File diff suppressed because it is too large
Load Diff
|
@ -18,7 +18,6 @@
|
|||
<tab type="files" visible="yes" title="" intro=""/>
|
||||
<tab type="globals" visible="yes" title="" intro=""/>
|
||||
</tab>
|
||||
<tab type="dirs" visible="yes" title="" intro=""/>
|
||||
<tab type="examples" visible="yes" title="" intro=""/>
|
||||
</navindex>
|
||||
|
||||
|
|
|
@ -0,0 +1,719 @@
|
|||
#LyX 2.3 created this file. For more info see http://www.lyx.org/
|
||||
\lyxformat 544
|
||||
\begin_document
|
||||
\begin_header
|
||||
\save_transient_properties true
|
||||
\origin unavailable
|
||||
\textclass article
|
||||
\use_default_options true
|
||||
\maintain_unincluded_children false
|
||||
\language english
|
||||
\language_package default
|
||||
\inputencoding auto
|
||||
\fontencoding global
|
||||
\font_roman "default" "default"
|
||||
\font_sans "default" "default"
|
||||
\font_typewriter "default" "default"
|
||||
\font_math "auto" "auto"
|
||||
\font_default_family default
|
||||
\use_non_tex_fonts false
|
||||
\font_sc false
|
||||
\font_osf false
|
||||
\font_sf_scale 100 100
|
||||
\font_tt_scale 100 100
|
||||
\use_microtype false
|
||||
\use_dash_ligatures true
|
||||
\graphics default
|
||||
\default_output_format default
|
||||
\output_sync 0
|
||||
\bibtex_command default
|
||||
\index_command default
|
||||
\paperfontsize 11
|
||||
\spacing single
|
||||
\use_hyperref false
|
||||
\papersize default
|
||||
\use_geometry true
|
||||
\use_package amsmath 1
|
||||
\use_package amssymb 1
|
||||
\use_package cancel 1
|
||||
\use_package esint 1
|
||||
\use_package mathdots 1
|
||||
\use_package mathtools 1
|
||||
\use_package mhchem 1
|
||||
\use_package stackrel 1
|
||||
\use_package stmaryrd 1
|
||||
\use_package undertilde 1
|
||||
\cite_engine basic
|
||||
\cite_engine_type default
|
||||
\biblio_style plain
|
||||
\use_bibtopic false
|
||||
\use_indices false
|
||||
\paperorientation portrait
|
||||
\suppress_date false
|
||||
\justification true
|
||||
\use_refstyle 1
|
||||
\use_minted 0
|
||||
\index Index
|
||||
\shortcut idx
|
||||
\color #008000
|
||||
\end_index
|
||||
\leftmargin 1in
|
||||
\topmargin 1in
|
||||
\rightmargin 1in
|
||||
\bottommargin 1in
|
||||
\secnumdepth 3
|
||||
\tocdepth 3
|
||||
\paragraph_separation indent
|
||||
\paragraph_indentation default
|
||||
\is_math_indent 0
|
||||
\math_numbering_side default
|
||||
\quotes_style english
|
||||
\dynamic_quotes 0
|
||||
\papercolumns 1
|
||||
\papersides 1
|
||||
\paperpagestyle default
|
||||
\tracking_changes false
|
||||
\output_changes false
|
||||
\html_math_output 0
|
||||
\html_css_as_file 0
|
||||
\html_be_strict false
|
||||
\end_header
|
||||
|
||||
\begin_body
|
||||
|
||||
\begin_layout Title
|
||||
Hybrid Inference
|
||||
\end_layout
|
||||
|
||||
\begin_layout Author
|
||||
Frank Dellaert & Varun Agrawal
|
||||
\end_layout
|
||||
|
||||
\begin_layout Date
|
||||
January 2023
|
||||
\end_layout
|
||||
|
||||
\begin_layout Section
|
||||
Hybrid Conditionals
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Here we develop a hybrid conditional density, on continuous variables (typically
|
||||
a measurement
|
||||
\begin_inset Formula $x$
|
||||
\end_inset
|
||||
|
||||
), given a mix of continuous variables
|
||||
\begin_inset Formula $y$
|
||||
\end_inset
|
||||
|
||||
and discrete variables
|
||||
\begin_inset Formula $m$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
We start by reviewing a Gaussian conditional density and its invariants
|
||||
(relationship between density, error, and normalization constant), and
|
||||
then work out what needs to happen for a hybrid version.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsubsection*
|
||||
GaussianConditional
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
A
|
||||
\emph on
|
||||
GaussianConditional
|
||||
\emph default
|
||||
is a properly normalized, multivariate Gaussian conditional density:
|
||||
\begin_inset Formula
|
||||
\[
|
||||
P(x|y)=\frac{1}{\sqrt{|2\pi\Sigma|}}\exp\left\{ -\frac{1}{2}\|Rx+Sy-d\|_{\Sigma}^{2}\right\}
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
where
|
||||
\begin_inset Formula $R$
|
||||
\end_inset
|
||||
|
||||
is square and upper-triangular.
|
||||
For every
|
||||
\emph on
|
||||
GaussianConditional
|
||||
\emph default
|
||||
, we have the following
|
||||
\series bold
|
||||
invariant
|
||||
\series default
|
||||
,
|
||||
\begin_inset Formula
|
||||
\begin{equation}
|
||||
\log P(x|y)=K_{gc}-E_{gc}(x,y),\label{eq:gc_invariant}
|
||||
\end{equation}
|
||||
|
||||
\end_inset
|
||||
|
||||
with the
|
||||
\series bold
|
||||
log-normalization constant
|
||||
\series default
|
||||
|
||||
\begin_inset Formula $K_{gc}$
|
||||
\end_inset
|
||||
|
||||
equal to
|
||||
\begin_inset Formula
|
||||
\begin{equation}
|
||||
K_{gc}=\log\frac{1}{\sqrt{|2\pi\Sigma|}}\label{eq:log_constant}
|
||||
\end{equation}
|
||||
|
||||
\end_inset
|
||||
|
||||
and the
|
||||
\series bold
|
||||
error
|
||||
\series default
|
||||
|
||||
\begin_inset Formula $E_{gc}(x,y)$
|
||||
\end_inset
|
||||
|
||||
equal to the negative log-density, up to a constant:
|
||||
\begin_inset Formula
|
||||
\begin{equation}
|
||||
E_{gc}(x,y)=\frac{1}{2}\|Rx+Sy-d\|_{\Sigma}^{2}.\label{eq:gc_error}
|
||||
\end{equation}
|
||||
|
||||
\end_inset
|
||||
|
||||
.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsubsection*
|
||||
GaussianMixture
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
A
|
||||
\emph on
|
||||
GaussianMixture
|
||||
\emph default
|
||||
(maybe to be renamed to
|
||||
\emph on
|
||||
GaussianMixtureComponent
|
||||
\emph default
|
||||
) just indexes into a number of
|
||||
\emph on
|
||||
GaussianConditional
|
||||
\emph default
|
||||
instances, that are each properly normalized:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
P(x|y,m)=P_{m}(x|y).
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
We store one
|
||||
\emph on
|
||||
GaussianConditional
|
||||
\emph default
|
||||
|
||||
\begin_inset Formula $P_{m}(x|y)$
|
||||
\end_inset
|
||||
|
||||
for every possible assignment
|
||||
\begin_inset Formula $m$
|
||||
\end_inset
|
||||
|
||||
to a set of discrete variables.
|
||||
As
|
||||
\emph on
|
||||
GaussianMixture
|
||||
\emph default
|
||||
is a
|
||||
\emph on
|
||||
Conditional
|
||||
\emph default
|
||||
, it needs to satisfy the a similar invariant to
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand eqref
|
||||
reference "eq:gc_invariant"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\begin_inset Formula
|
||||
\begin{equation}
|
||||
\log P(x|y,m)=K_{gm}-E_{gm}(x,y,m).\label{eq:gm_invariant}
|
||||
\end{equation}
|
||||
|
||||
\end_inset
|
||||
|
||||
If we take the log of
|
||||
\begin_inset Formula $P(x|y,m)$
|
||||
\end_inset
|
||||
|
||||
we get
|
||||
\begin_inset Formula
|
||||
\begin{equation}
|
||||
\log P(x|y,m)=\log P_{m}(x|y)=K_{gcm}-E_{gcm}(x,y).\label{eq:gm_log}
|
||||
\end{equation}
|
||||
|
||||
\end_inset
|
||||
|
||||
Equating
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand eqref
|
||||
reference "eq:gm_invariant"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
and
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand eqref
|
||||
reference "eq:gm_log"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
we see that this can be achieved by defining the error
|
||||
\begin_inset Formula $E_{gm}(x,y,m)$
|
||||
\end_inset
|
||||
|
||||
as
|
||||
\begin_inset Formula
|
||||
\begin{equation}
|
||||
E_{gm}(x,y,m)=E_{gcm}(x,y)+K_{gm}-K_{gcm}\label{eq:gm_error}
|
||||
\end{equation}
|
||||
|
||||
\end_inset
|
||||
|
||||
where choose
|
||||
\begin_inset Formula $K_{gm}=\max K_{gcm}$
|
||||
\end_inset
|
||||
|
||||
, as then the error will always be positive.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Section
|
||||
Hybrid Factors
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
In GTSAM, we typically condition on known measurements, and factors encode
|
||||
the resulting negative log-likelihood of the unknown variables
|
||||
\begin_inset Formula $y$
|
||||
\end_inset
|
||||
|
||||
given the measurements
|
||||
\begin_inset Formula $x$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
We review how a Gaussian conditional density is converted into a Gaussian
|
||||
factor, and then develop a hybrid version satisfying the correct invariants
|
||||
as well.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsubsection*
|
||||
JacobianFactor
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
A
|
||||
\emph on
|
||||
JacobianFactor
|
||||
\emph default
|
||||
typically results from a
|
||||
\emph on
|
||||
GaussianConditional
|
||||
\emph default
|
||||
by having known values
|
||||
\begin_inset Formula $\bar{x}$
|
||||
\end_inset
|
||||
|
||||
for the
|
||||
\begin_inset Quotes eld
|
||||
\end_inset
|
||||
|
||||
measurement
|
||||
\begin_inset Quotes erd
|
||||
\end_inset
|
||||
|
||||
|
||||
\begin_inset Formula $x$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\begin_inset Formula
|
||||
\begin{equation}
|
||||
L(y)\propto P(\bar{x}|y)\label{eq:likelihood}
|
||||
\end{equation}
|
||||
|
||||
\end_inset
|
||||
|
||||
In GTSAM factors represent the negative log-likelihood
|
||||
\begin_inset Formula $E_{jf}(y)$
|
||||
\end_inset
|
||||
|
||||
and hence we have
|
||||
\begin_inset Formula
|
||||
\[
|
||||
E_{jf}(y)=-\log L(y)=C-\log P(\bar{x}|y),
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
with
|
||||
\begin_inset Formula $C$
|
||||
\end_inset
|
||||
|
||||
the log of the proportionality constant in
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand eqref
|
||||
reference "eq:likelihood"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
.
|
||||
Substituting in
|
||||
\begin_inset Formula $\log P(\bar{x}|y)$
|
||||
\end_inset
|
||||
|
||||
from the invariant
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand eqref
|
||||
reference "eq:gc_invariant"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
we obtain
|
||||
\begin_inset Formula
|
||||
\[
|
||||
E_{jf}(y)=C-K_{gc}+E_{gc}(\bar{x},y).
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
The
|
||||
\emph on
|
||||
likelihood
|
||||
\emph default
|
||||
function in
|
||||
\emph on
|
||||
GaussianConditional
|
||||
\emph default
|
||||
chooses
|
||||
\begin_inset Formula $C=K_{gc}$
|
||||
\end_inset
|
||||
|
||||
, and the
|
||||
\emph on
|
||||
JacobianFactor
|
||||
\emph default
|
||||
does not store any constant; it just implements:
|
||||
\begin_inset Formula
|
||||
\[
|
||||
E_{jf}(y)=E_{gc}(\bar{x},y)=\frac{1}{2}\|R\bar{x}+Sy-d\|_{\Sigma}^{2}=\frac{1}{2}\|Ay-b\|_{\Sigma}^{2}
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
with
|
||||
\begin_inset Formula $A=S$
|
||||
\end_inset
|
||||
|
||||
and
|
||||
\begin_inset Formula $b=d-R\bar{x}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsubsection*
|
||||
GaussianMixtureFactor
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Analogously, a
|
||||
\emph on
|
||||
GaussianMixtureFactor
|
||||
\emph default
|
||||
typically results from a GaussianMixture by having known values
|
||||
\begin_inset Formula $\bar{x}$
|
||||
\end_inset
|
||||
|
||||
for the
|
||||
\begin_inset Quotes eld
|
||||
\end_inset
|
||||
|
||||
measurement
|
||||
\begin_inset Quotes erd
|
||||
\end_inset
|
||||
|
||||
|
||||
\begin_inset Formula $x$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\begin_inset Formula
|
||||
\[
|
||||
L(y,m)\propto P(\bar{x}|y,m).
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
We will similarly implement the negative log-likelihood
|
||||
\begin_inset Formula $E_{mf}(y,m)$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\begin_inset Formula
|
||||
\[
|
||||
E_{mf}(y,m)=-\log L(y,m)=C-\log P(\bar{x}|y,m).
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
Since we know the log-density from the invariant
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand eqref
|
||||
reference "eq:gm_invariant"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
, we obtain
|
||||
\begin_inset Formula
|
||||
\[
|
||||
\log P(\bar{x}|y,m)=K_{gm}-E_{gm}(\bar{x},y,m),
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
and hence
|
||||
\begin_inset Formula
|
||||
\[
|
||||
E_{mf}(y,m)=C+E_{gm}(\bar{x},y,m)-K_{gm}.
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
Substituting in
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand eqref
|
||||
reference "eq:gm_error"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
we finally have an expression where
|
||||
\begin_inset Formula $K_{gm}$
|
||||
\end_inset
|
||||
|
||||
canceled out, but we have a dependence on the individual component constants
|
||||
|
||||
\begin_inset Formula $K_{gcm}$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\begin_inset Formula
|
||||
\[
|
||||
E_{mf}(y,m)=C+E_{gcm}(\bar{x},y)-K_{gcm}.
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
Unfortunately, we can no longer choose
|
||||
\begin_inset Formula $C$
|
||||
\end_inset
|
||||
|
||||
independently from
|
||||
\begin_inset Formula $m$
|
||||
\end_inset
|
||||
|
||||
to make the constant disappear.
|
||||
There are two possibilities:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Enumerate
|
||||
Implement likelihood to yield both a hybrid factor
|
||||
\emph on
|
||||
and
|
||||
\emph default
|
||||
a discrete factor.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Enumerate
|
||||
Hide the constant inside the collection of JacobianFactor instances, which
|
||||
is the possibility we implement.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
In either case, we implement the mixture factor
|
||||
\begin_inset Formula $E_{mf}(y,m)$
|
||||
\end_inset
|
||||
|
||||
as a set of
|
||||
\emph on
|
||||
JacobianFactor
|
||||
\emph default
|
||||
instances
|
||||
\begin_inset Formula $E_{mf}(y,m)$
|
||||
\end_inset
|
||||
|
||||
, indexed by the discrete assignment
|
||||
\begin_inset Formula $m$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\begin_inset Formula
|
||||
\[
|
||||
E_{mf}(y,m)=E_{jfm}(y)=\frac{1}{2}\|A_{m}y-b_{m}\|_{\Sigma_{mfm}}^{2}.
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
In GTSAM, we define
|
||||
\begin_inset Formula $A_{m}$
|
||||
\end_inset
|
||||
|
||||
and
|
||||
\begin_inset Formula $b_{m}$
|
||||
\end_inset
|
||||
|
||||
strategically to make the
|
||||
\emph on
|
||||
JacobianFactor
|
||||
\emph default
|
||||
compute the constant, as well:
|
||||
\begin_inset Formula
|
||||
\[
|
||||
\frac{1}{2}\|A_{m}y-b_{m}\|_{\Sigma_{mfm}}^{2}=C+E_{gcm}(\bar{x},y)-K_{gcm}.
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
Substituting in the definition
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand eqref
|
||||
reference "eq:gc_error"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
for
|
||||
\begin_inset Formula $E_{gcm}(\bar{x},y)$
|
||||
\end_inset
|
||||
|
||||
we need
|
||||
\begin_inset Formula
|
||||
\[
|
||||
\frac{1}{2}\|A_{m}y-b_{m}\|_{\Sigma_{mfm}}^{2}=C+\frac{1}{2}\|R_{m}\bar{x}+S_{m}y-d_{m}\|_{\Sigma_{m}}^{2}-K_{gcm}
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
which can achieved by setting
|
||||
\begin_inset Formula
|
||||
\[
|
||||
A_{m}=\left[\begin{array}{c}
|
||||
S_{m}\\
|
||||
0
|
||||
\end{array}\right],~b_{m}=\left[\begin{array}{c}
|
||||
d_{m}-R_{m}\bar{x}\\
|
||||
c_{m}
|
||||
\end{array}\right],~\Sigma_{mfm}=\left[\begin{array}{cc}
|
||||
\Sigma_{m}\\
|
||||
& 1
|
||||
\end{array}\right]
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
and setting the mode-dependent scalar
|
||||
\begin_inset Formula $c_{m}$
|
||||
\end_inset
|
||||
|
||||
such that
|
||||
\begin_inset Formula $c_{m}^{2}=C-K_{gcm}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
This can be achieved by
|
||||
\begin_inset Formula $C=\max K_{gcm}=K_{gm}$
|
||||
\end_inset
|
||||
|
||||
and
|
||||
\begin_inset Formula $c_{m}=\sqrt{2(C-K_{gcm})}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
Note that in case that all constants
|
||||
\begin_inset Formula $K_{gcm}$
|
||||
\end_inset
|
||||
|
||||
are equal, we can just use
|
||||
\begin_inset Formula $C=K_{gm}$
|
||||
\end_inset
|
||||
|
||||
and
|
||||
\begin_inset Formula
|
||||
\[
|
||||
A_{m}=S_{m},~b_{m}=d_{m}-R_{m}\bar{x},~\Sigma_{mfm}=\Sigma_{m}
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
as before.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
In summary, we have
|
||||
\begin_inset Formula
|
||||
\begin{equation}
|
||||
E_{mf}(y,m)=\frac{1}{2}\|A_{m}y-b_{m}\|_{\Sigma_{mfm}}^{2}=E_{gcm}(\bar{x},y)+K_{gm}-K_{gcm}.\label{eq:mf_invariant}
|
||||
\end{equation}
|
||||
|
||||
\end_inset
|
||||
|
||||
which is identical to the GaussianMixture error
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand eqref
|
||||
reference "eq:gm_error"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
.
|
||||
\end_layout
|
||||
|
||||
\end_body
|
||||
\end_document
|
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
254
doc/gtsam.lyx
254
doc/gtsam.lyx
|
@ -134,14 +134,10 @@ A Hands-on Introduction
|
|||
|
||||
\begin_layout Author
|
||||
Frank Dellaert
|
||||
\begin_inset Newline newline
|
||||
\end_inset
|
||||
|
||||
Technical Report number GT-RIM-CP&R-2014-XXX
|
||||
\end_layout
|
||||
|
||||
\begin_layout Date
|
||||
September 2014
|
||||
Updated Last March 2022
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
|
@ -154,18 +150,14 @@ filename "common_macros.tex"
|
|||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Section*
|
||||
Overview
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
In this document I provide a hands-on introduction to both factor graphs
|
||||
and GTSAM.
|
||||
This is an updated version from the 2012 TR that is tailored to our GTSAM
|
||||
3.0 library and beyond.
|
||||
4.0 library and beyond.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
|
||||
\series bold
|
||||
Factor graphs
|
||||
|
@ -206,7 +198,7 @@ ts or prior knowledge.
|
|||
robotics and vision.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
The GTSAM toolbox (GTSAM stands for
|
||||
\begin_inset Quotes eld
|
||||
\end_inset
|
||||
|
@ -221,11 +213,13 @@ Georgia Tech Smoothing and Mapping
|
|||
It provides state of the art solutions to the SLAM and SFM problems, but
|
||||
can also be used to model and solve both simpler and more complex estimation
|
||||
problems.
|
||||
It also provides a MATLAB interface which allows for rapid prototype developmen
|
||||
t, visualization, and user interaction.
|
||||
It also provides MATLAB and Python wrappers which allow for rapid prototype
|
||||
development, visualization, and user interaction.
|
||||
In addition, it is easy to use in Jupyter notebooks and/or Google's coLaborator
|
||||
y.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
GTSAM exploits sparsity to be computationally efficient.
|
||||
Typically measurements only provide information on the relationship between
|
||||
a handful of variables, and hence the resulting factor graph will be sparsely
|
||||
|
@ -236,14 +230,17 @@ l complexity.
|
|||
GTSAM provides iterative methods that are quite efficient regardless.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
You can download the latest version of GTSAM at
|
||||
\begin_layout Abstract
|
||||
You can download the latest version of GTSAM from GitHub at
|
||||
\end_layout
|
||||
|
||||
\begin_layout Abstract
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
http://tinyurl.com/gtsam
|
||||
https://github.com/borglab/gtsam
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
@ -741,7 +738,7 @@ noindent
|
|||
\begin_inset Formula $f_{0}(x_{1})$
|
||||
\end_inset
|
||||
|
||||
on lines 5-8 as an instance of
|
||||
on lines 5-7 as an instance of
|
||||
\series bold
|
||||
\emph on
|
||||
PriorFactor<T>
|
||||
|
@ -773,7 +770,7 @@ Pose2,
|
|||
noiseModel::Diagonal
|
||||
\series default
|
||||
\emph default
|
||||
by specifying three standard deviations in line 7, respectively 30 cm.
|
||||
by specifying three standard deviations in line 6, respectively 30 cm.
|
||||
\begin_inset space ~
|
||||
\end_inset
|
||||
|
||||
|
@ -795,7 +792,7 @@ Similarly, odometry measurements are specified as
|
|||
Pose2
|
||||
\series default
|
||||
\emph default
|
||||
on line 11, with a slightly different noise model defined on line 12-13.
|
||||
on line 10, with a slightly different noise model defined on line 11.
|
||||
We then add the two factors
|
||||
\begin_inset Formula $f_{1}(x_{1},x_{2};o_{1})$
|
||||
\end_inset
|
||||
|
@ -804,7 +801,7 @@ Pose2
|
|||
\begin_inset Formula $f_{2}(x_{2},x_{3};o_{2})$
|
||||
\end_inset
|
||||
|
||||
on lines 14-15, as instances of yet another templated class,
|
||||
on lines 12-13, as instances of yet another templated class,
|
||||
\series bold
|
||||
\emph on
|
||||
BetweenFactor<T>
|
||||
|
@ -875,7 +872,7 @@ smoothing and mapping
|
|||
|
||||
.
|
||||
Later in this document we will talk about how we can also use GTSAM to
|
||||
do filtering (which you often do
|
||||
do filtering (which often you do
|
||||
\emph on
|
||||
not
|
||||
\emph default
|
||||
|
@ -928,7 +925,11 @@ Values
|
|||
\begin_layout Standard
|
||||
The latter point is often a point of confusion with beginning users of GTSAM.
|
||||
It helps to remember that when designing GTSAM we took a functional approach
|
||||
of classes corresponding to mathematical objects, which are usually immutable.
|
||||
of classes corresponding to mathematical objects, which are usually
|
||||
\emph on
|
||||
immutable
|
||||
\emph default
|
||||
.
|
||||
You should think of a factor graph as a
|
||||
\emph on
|
||||
function
|
||||
|
@ -1027,7 +1028,7 @@ NonlinearFactorGraph
|
|||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The relevant output from running the example is as follows:
|
||||
The relevant output from running the example is as follows:
|
||||
\family typewriter
|
||||
\size small
|
||||
|
||||
|
@ -1363,14 +1364,18 @@ where
|
|||
\end_inset
|
||||
|
||||
is the measurement,
|
||||
\begin_inset Formula $q$
|
||||
\begin_inset Formula $q\in SE(2)$
|
||||
\end_inset
|
||||
|
||||
is the unknown variable,
|
||||
\begin_inset Formula $h(q)$
|
||||
\end_inset
|
||||
|
||||
is a (possibly nonlinear) measurement function, and
|
||||
is a
|
||||
\series bold
|
||||
measurement function
|
||||
\series default
|
||||
, and
|
||||
\begin_inset Formula $\Sigma$
|
||||
\end_inset
|
||||
|
||||
|
@ -1546,7 +1551,7 @@ E(q)\define h(q)-m
|
|||
|
||||
\end_inset
|
||||
|
||||
which is done on line 12.
|
||||
which is done on line 14.
|
||||
Importantly, because we want to use this factor for nonlinear optimization
|
||||
(see e.g.,
|
||||
\begin_inset CommandInset citation
|
||||
|
@ -1599,11 +1604,11 @@ q_{y}
|
|||
\begin_inset Formula $q=\left(q_{x},q_{y},q_{\theta}\right)$
|
||||
\end_inset
|
||||
|
||||
, yields the following simple
|
||||
, yields the following
|
||||
\begin_inset Formula $2\times3$
|
||||
\end_inset
|
||||
|
||||
matrix in tangent space which is the same the as the rotation matrix:
|
||||
matrix:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
|
@ -1618,6 +1623,171 @@ H=\left[\begin{array}{ccc}
|
|||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Paragraph*
|
||||
Important Note
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Many of our users, when attempting to create a custom factor, are initially
|
||||
surprised at the Jacobian matrix not agreeing with their intuition.
|
||||
For example, above you might simply expect a
|
||||
\begin_inset Formula $2\times3$
|
||||
\end_inset
|
||||
|
||||
identity matrix.
|
||||
This
|
||||
\emph on
|
||||
would
|
||||
\emph default
|
||||
be true for variables belonging to a vector space.
|
||||
However, in GTSAM we define the Jacobian more generally to be the matrix
|
||||
|
||||
\begin_inset Formula $H$
|
||||
\end_inset
|
||||
|
||||
such that
|
||||
\begin_inset Formula
|
||||
\[
|
||||
h(q\exp\hat{\xi})\approx h(q)+H\xi
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
where
|
||||
\begin_inset Formula $\xi=(\delta x,\delta y,\delta\theta)$
|
||||
\end_inset
|
||||
|
||||
is an incremental update and
|
||||
\begin_inset Formula $\exp\hat{\xi}$
|
||||
\end_inset
|
||||
|
||||
is the
|
||||
\series bold
|
||||
exponential map
|
||||
\series default
|
||||
for the variable we want to update.
|
||||
In this case
|
||||
\begin_inset Formula $q\in SE(2)$
|
||||
\end_inset
|
||||
|
||||
, where
|
||||
\begin_inset Formula $SE(2)$
|
||||
\end_inset
|
||||
|
||||
is the group of 2D rigid transforms, implemented by
|
||||
\series bold
|
||||
\emph on
|
||||
Pose2
|
||||
\emph default
|
||||
.
|
||||
|
||||
\series default
|
||||
The exponential map for
|
||||
\begin_inset Formula $SE(2)$
|
||||
\end_inset
|
||||
|
||||
can be approximated to first order as
|
||||
\begin_inset Formula
|
||||
\[
|
||||
\exp\hat{\xi}\approx\left[\begin{array}{ccc}
|
||||
1 & -\delta\theta & \delta x\\
|
||||
\delta\theta & 1 & \delta y\\
|
||||
0 & 0 & 1
|
||||
\end{array}\right]
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
when using the
|
||||
\begin_inset Formula $3\times3$
|
||||
\end_inset
|
||||
|
||||
matrix representation for 2D poses, and hence
|
||||
\begin_inset Formula
|
||||
\[
|
||||
h(qe^{\hat{\xi}})\approx h\left(\left[\begin{array}{ccc}
|
||||
\cos(q_{\theta}) & -\sin(q_{\theta}) & q_{x}\\
|
||||
\sin(q_{\theta}) & \cos(q_{\theta}) & q_{y}\\
|
||||
0 & 0 & 1
|
||||
\end{array}\right]\left[\begin{array}{ccc}
|
||||
1 & -\delta\theta & \delta x\\
|
||||
\delta\theta & 1 & \delta y\\
|
||||
0 & 0 & 1
|
||||
\end{array}\right]\right)=\left[\begin{array}{c}
|
||||
q_{x}+\cos(q_{\theta})\delta x-\sin(q_{\theta})\delta y\\
|
||||
q_{y}+\sin(q_{\theta})\delta x+\cos(q_{\theta})\delta y
|
||||
\end{array}\right]
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
which then explains the Jacobian
|
||||
\begin_inset Formula $H$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Lie groups are very relevant in the robotics context, and you can read more
|
||||
here:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Itemize
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
https://github.com/borglab/gtsam/blob/develop/doc/LieGroups.pdf
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Itemize
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
https://github.com/borglab/gtsam/blob/develop/doc/math.pdf
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
In some cases you want to go even beyond Lie groups to a looser concept,
|
||||
|
||||
\series bold
|
||||
manifolds
|
||||
\series default
|
||||
, because not all unknown variables behave like a group, e.g., the space of
|
||||
3D planes, 2D lines, directions in space, etc.
|
||||
For manifolds we do not always have an exponential map, but we have a retractio
|
||||
n that plays the same role.
|
||||
Some of this is explained here:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Itemize
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
https://gtsam.org/notes/GTSAM-Concepts.html
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
|
@ -1680,13 +1850,13 @@ UnaryFactor
|
|||
\series default
|
||||
\emph default
|
||||
instances, and add them to graph.
|
||||
GTSAM uses shared pointers to refer to factors in factor graphs, and
|
||||
GTSAM uses shared pointers to refer to factors, and
|
||||
\series bold
|
||||
\emph on
|
||||
boost::make_shared
|
||||
emplace_shared
|
||||
\series default
|
||||
\emph default
|
||||
is a convenience function to simultaneously construct a class and create
|
||||
is a convenience method to simultaneously construct a class and create
|
||||
a
|
||||
\series bold
|
||||
\emph on
|
||||
|
@ -1694,22 +1864,6 @@ shared_ptr
|
|||
\series default
|
||||
\emph default
|
||||
to it.
|
||||
|
||||
\begin_inset Note Note
|
||||
status collapsed
|
||||
|
||||
\begin_layout Plain Layout
|
||||
and on lines 4-6 we add three newly created
|
||||
\series bold
|
||||
\emph on
|
||||
UnaryFactor
|
||||
\series default
|
||||
\emph default
|
||||
instances to the graph.
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
We obtain the factor graph from Figure
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand vref
|
||||
|
|
BIN
doc/gtsam.pdf
BIN
doc/gtsam.pdf
Binary file not shown.
88
doc/refs.bib
88
doc/refs.bib
|
@ -1,26 +1,72 @@
|
|||
%% This BibTeX bibliography file was created using BibDesk.
|
||||
%% https://bibdesk.sourceforge.io/
|
||||
|
||||
%% Created for Varun Agrawal at 2021-09-27 17:39:09 -0400
|
||||
|
||||
|
||||
%% Saved with string encoding Unicode (UTF-8)
|
||||
|
||||
|
||||
|
||||
@article{Lupton12tro,
|
||||
author = {Lupton, Todd and Sukkarieh, Salah},
|
||||
date-added = {2021-09-27 17:38:56 -0400},
|
||||
date-modified = {2021-09-27 17:39:09 -0400},
|
||||
doi = {10.1109/TRO.2011.2170332},
|
||||
journal = {IEEE Transactions on Robotics},
|
||||
number = {1},
|
||||
pages = {61-76},
|
||||
title = {Visual-Inertial-Aided Navigation for High-Dynamic Motion in Built Environments Without Initial Conditions},
|
||||
volume = {28},
|
||||
year = {2012},
|
||||
Bdsk-Url-1 = {https://doi.org/10.1109/TRO.2011.2170332}}
|
||||
|
||||
@inproceedings{Forster15rss,
|
||||
author = {Christian Forster and Luca Carlone and Frank Dellaert and Davide Scaramuzza},
|
||||
booktitle = {Robotics: Science and Systems},
|
||||
date-added = {2021-09-26 20:44:41 -0400},
|
||||
date-modified = {2021-09-26 20:45:03 -0400},
|
||||
title = {IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation},
|
||||
year = {2015}}
|
||||
|
||||
@article{Iserles00an,
|
||||
title = {Lie-group methods},
|
||||
author = {Iserles, Arieh and Munthe-Kaas, Hans Z and
|
||||
N{\o}rsett, Syvert P and Zanna, Antonella},
|
||||
journal = {Acta Numerica 2000},
|
||||
volume = {9},
|
||||
pages = {215--365},
|
||||
year = {2000},
|
||||
publisher = {Cambridge Univ Press}
|
||||
}
|
||||
author = {Iserles, Arieh and Munthe-Kaas, Hans Z and N{\o}rsett, Syvert P and Zanna, Antonella},
|
||||
journal = {Acta Numerica 2000},
|
||||
pages = {215--365},
|
||||
publisher = {Cambridge Univ Press},
|
||||
title = {Lie-group methods},
|
||||
volume = {9},
|
||||
year = {2000}}
|
||||
|
||||
@book{Murray94book,
|
||||
title = {A mathematical introduction to robotic manipulation},
|
||||
author = {Murray, Richard M and Li, Zexiang and Sastry, S
|
||||
Shankar and Sastry, S Shankara},
|
||||
year = {1994},
|
||||
publisher = {CRC press}
|
||||
}
|
||||
author = {Murray, Richard M and Li, Zexiang and Sastry, S Shankar and Sastry, S Shankara},
|
||||
publisher = {CRC press},
|
||||
title = {A mathematical introduction to robotic manipulation},
|
||||
year = {1994}}
|
||||
|
||||
@book{Spivak65book,
|
||||
title = {Calculus on manifolds},
|
||||
author = {Spivak, Michael},
|
||||
volume = {1},
|
||||
year = {1965},
|
||||
publisher = {WA Benjamin New York}
|
||||
}
|
||||
author = {Spivak, Michael},
|
||||
publisher = {WA Benjamin New York},
|
||||
title = {Calculus on manifolds},
|
||||
volume = {1},
|
||||
year = {1965}}
|
||||
|
||||
@phdthesis{Nikolic16thesis,
|
||||
title={Characterisation, calibration, and design of visual-inertial sensor systems for robot navigation},
|
||||
author={Nikolic, Janosch},
|
||||
year={2016},
|
||||
school={ETH Zurich}
|
||||
}
|
||||
|
||||
@book{Simon06book,
|
||||
title={Optimal state estimation: Kalman, H infinity, and nonlinear approaches},
|
||||
author={Simon, Dan},
|
||||
year={2006},
|
||||
publisher={John Wiley \& Sons}
|
||||
}
|
||||
|
||||
@inproceedings{Trawny05report_IndirectKF,
|
||||
title={Indirect Kalman Filter for 3 D Attitude Estimation},
|
||||
author={Nikolas Trawny and Stergios I. Roumeliotis},
|
||||
year={2005}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,23 @@
|
|||
set (excluded_examples
|
||||
elaboratePoint2KalmanFilter.cpp
|
||||
"elaboratePoint2KalmanFilter.cpp"
|
||||
)
|
||||
|
||||
# if GTSAM_ENABLE_BOOST_SERIALIZATION is not set then SolverComparer.cpp will not compile
|
||||
if (NOT GTSAM_ENABLE_BOOST_SERIALIZATION)
|
||||
list (APPEND excluded_examples
|
||||
"SolverComparer.cpp"
|
||||
)
|
||||
endif()
|
||||
|
||||
# Add examples to exclude if GTSAM_USE_BOOST_FEATURES is not set
|
||||
if (NOT GTSAM_USE_BOOST_FEATURES)
|
||||
# add to excluded examples
|
||||
list (APPEND excluded_examples
|
||||
"CombinedImuFactorsExample.cpp"
|
||||
"ImuFactorsExample.cpp"
|
||||
"ShonanAveragingCLI.cpp"
|
||||
"SolverComparer.cpp"
|
||||
)
|
||||
endif()
|
||||
|
||||
gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam;${Boost_PROGRAM_OPTIONS_LIBRARY}")
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <boost/make_shared.hpp>
|
||||
|
||||
using namespace gtsam;
|
||||
using namespace gtsam::noiseModel;
|
||||
|
@ -30,8 +29,8 @@ using symbol_shorthand::X;
|
|||
* Unary factor on the unknown pose, resulting from meauring the projection of
|
||||
* a known 3D point in the image
|
||||
*/
|
||||
class ResectioningFactor: public NoiseModelFactor1<Pose3> {
|
||||
typedef NoiseModelFactor1<Pose3> Base;
|
||||
class ResectioningFactor: public NoiseModelFactorN<Pose3> {
|
||||
typedef NoiseModelFactorN<Pose3> Base;
|
||||
|
||||
Cal3_S2::shared_ptr K_; ///< camera's intrinsic parameters
|
||||
Point3 P_; ///< 3D point on the calibration rig
|
||||
|
@ -46,10 +45,9 @@ public:
|
|||
}
|
||||
|
||||
/// evaluate the error
|
||||
Vector evaluateError(const Pose3& pose, boost::optional<Matrix&> H =
|
||||
boost::none) const override {
|
||||
Vector evaluateError(const Pose3& pose, OptionalMatrixType H) const override {
|
||||
PinholeCamera<Cal3_S2> camera(pose, *K_);
|
||||
return camera.project(P_, H, boost::none, boost::none) - p_;
|
||||
return camera.project(P_, H, OptionalNone, OptionalNone) - p_;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -71,7 +69,7 @@ int main(int argc, char* argv[]) {
|
|||
/* 2. add factors to the graph */
|
||||
// add measurement factors
|
||||
SharedDiagonal measurementNoise = Diagonal::Sigmas(Vector2(0.5, 0.5));
|
||||
boost::shared_ptr<ResectioningFactor> factor;
|
||||
std::shared_ptr<ResectioningFactor> factor;
|
||||
graph.emplace_shared<ResectioningFactor>(measurementNoise, X(1), calib,
|
||||
Point2(55, 45), Point3(10, 10, 0));
|
||||
graph.emplace_shared<ResectioningFactor>(measurementNoise, X(1), calib,
|
||||
|
|
|
@ -60,13 +60,14 @@ namespace po = boost::program_options;
|
|||
|
||||
po::variables_map parseOptions(int argc, char* argv[]) {
|
||||
po::options_description desc;
|
||||
desc.add_options()("help,h", "produce help message")(
|
||||
"data_csv_path", po::value<string>()->default_value("imuAndGPSdata.csv"),
|
||||
"path to the CSV file with the IMU data")(
|
||||
"output_filename",
|
||||
po::value<string>()->default_value("imuFactorExampleResults.csv"),
|
||||
"path to the result file to use")("use_isam", po::bool_switch(),
|
||||
"use ISAM as the optimizer");
|
||||
desc.add_options()("help,h", "produce help message") // help message
|
||||
("data_csv_path", po::value<string>()->default_value("imuAndGPSdata.csv"),
|
||||
"path to the CSV file with the IMU data") // path to the data file
|
||||
("output_filename",
|
||||
po::value<string>()->default_value("imuFactorExampleResults.csv"),
|
||||
"path to the result file to use") // filename to save results to
|
||||
("use_isam", po::bool_switch(),
|
||||
"use ISAM as the optimizer"); // flag for ISAM optimizer
|
||||
|
||||
po::variables_map vm;
|
||||
po::store(po::parse_command_line(argc, argv, desc), vm);
|
||||
|
@ -94,7 +95,7 @@ Vector10 readInitialState(ifstream& file) {
|
|||
return initial_state;
|
||||
}
|
||||
|
||||
boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
|
||||
std::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
|
||||
// We use the sensor specs to build the noise model for the IMU factor.
|
||||
double accel_noise_sigma = 0.0003924;
|
||||
double gyro_noise_sigma = 0.000205689024915;
|
||||
|
@ -106,7 +107,7 @@ boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
|
|||
I_3x3 * 1e-8; // error committed in integrating position from velocities
|
||||
Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2);
|
||||
Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2);
|
||||
Matrix66 bias_acc_omega_int =
|
||||
Matrix66 bias_acc_omega_init =
|
||||
I_6x6 * 1e-5; // error in the bias used for preintegration
|
||||
|
||||
auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0);
|
||||
|
@ -122,7 +123,7 @@ boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
|
|||
// PreintegrationCombinedMeasurements params:
|
||||
p->biasAccCovariance = bias_acc_cov; // acc bias in continuous
|
||||
p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous
|
||||
p->biasAccOmegaInt = bias_acc_omega_int;
|
||||
p->biasAccOmegaInt = bias_acc_omega_init;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
|
|
@ -18,9 +18,6 @@
|
|||
#include <gtsam/geometry/CalibratedCamera.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
#include <boost/assign/std/vector.hpp>
|
||||
|
||||
using namespace boost::assign;
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
|
||||
<!DOCTYPE boost_serialization>
|
||||
<boost_serialization signature="serialization::archive" version="17">
|
||||
<graph class_id="0" tracking_level="0" version="0">
|
||||
<boost_serialization signature="serialization::archive" version="19">
|
||||
<data class_id="0" tracking_level="0" version="0">
|
||||
<Base class_id="1" tracking_level="0" version="0">
|
||||
<factors_ class_id="2" tracking_level="0" version="0">
|
||||
<count>32</count>
|
||||
<item_version>1</item_version>
|
||||
<item class_id="3" tracking_level="0" version="1">
|
||||
<px class_id="4" class_name="JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<px class_id="4" class_name="gtsam::JacobianFactor" tracking_level="1" version="1" object_id="_0">
|
||||
<Base class_id="5" tracking_level="0" version="0">
|
||||
<Base class_id="6" tracking_level="0" version="0">
|
||||
<keys_>
|
||||
|
@ -100,9 +100,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_ class_id="11" tracking_level="0" version="1">
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -199,9 +197,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -298,9 +294,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -397,9 +391,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -496,9 +488,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -595,9 +585,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -694,9 +682,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -793,9 +779,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -892,9 +876,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -991,9 +973,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -1090,9 +1070,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -1189,9 +1167,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -1288,9 +1264,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -1387,9 +1361,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -1486,9 +1458,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -1585,9 +1555,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -1684,9 +1652,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -1783,9 +1749,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -1882,9 +1846,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -1981,9 +1943,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -2080,9 +2040,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -2179,9 +2137,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -2278,9 +2234,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -2377,9 +2331,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -2476,9 +2428,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -2575,9 +2525,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -2674,9 +2622,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -2773,9 +2719,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -2872,9 +2816,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -2971,9 +2913,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -3070,9 +3010,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -3402,13 +3340,11 @@
|
|||
<rowEnd_>3</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
</factors_>
|
||||
</Base>
|
||||
</graph>
|
||||
</data>
|
||||
</boost_serialization>
|
||||
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
|
||||
<!DOCTYPE boost_serialization>
|
||||
<boost_serialization signature="serialization::archive" version="17">
|
||||
<graph class_id="0" tracking_level="0" version="0">
|
||||
<boost_serialization signature="serialization::archive" version="19">
|
||||
<data class_id="0" tracking_level="0" version="0">
|
||||
<Base class_id="1" tracking_level="0" version="0">
|
||||
<factors_ class_id="2" tracking_level="0" version="0">
|
||||
<count>2</count>
|
||||
<item_version>1</item_version>
|
||||
<item class_id="3" tracking_level="0" version="1">
|
||||
<px class_id="4" class_name="JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<px class_id="4" class_name="gtsam::JacobianFactor" tracking_level="1" version="1" object_id="_0">
|
||||
<Base class_id="5" tracking_level="0" version="0">
|
||||
<Base class_id="6" tracking_level="0" version="0">
|
||||
<keys_>
|
||||
|
@ -100,9 +100,7 @@
|
|||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_ class_id="11" tracking_level="0" version="1">
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -157,13 +155,11 @@
|
|||
<rowEnd_>3</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
<model_null>1</model_null>
|
||||
</px>
|
||||
</item>
|
||||
</factors_>
|
||||
</Base>
|
||||
</graph>
|
||||
</data>
|
||||
</boost_serialization>
|
||||
|
||||
|
|
|
@ -51,8 +51,7 @@ int main(int argc, char **argv) {
|
|||
DiscreteFactorGraph fg(asia);
|
||||
|
||||
// Create solver and eliminate
|
||||
Ordering ordering;
|
||||
ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7);
|
||||
const Ordering ordering{0, 1, 2, 3, 4, 5, 6, 7};
|
||||
|
||||
// solve
|
||||
auto mpe = fg.optimize();
|
||||
|
|
|
@ -61,7 +61,7 @@ using symbol_shorthand::X; // for poses
|
|||
/* ************************************************************************* */
|
||||
int main(int argc, char *argv[]) {
|
||||
// Define the camera calibration parameters
|
||||
auto K = boost::make_shared<Cal3Fisheye>(
|
||||
auto K = std::make_shared<Cal3Fisheye>(
|
||||
278.66, 278.48, 0.0, 319.75, 241.96, -0.013721808247486035,
|
||||
0.020727425669427896, -0.012786476702685545, 0.0025242267320687625);
|
||||
|
||||
|
|
|
@ -87,9 +87,8 @@ int main(int argc, char* argv[]) {
|
|||
result.print();
|
||||
|
||||
cout << "Detailed results:" << endl;
|
||||
for (auto keyedStatus : result.detail->variableStatus) {
|
||||
const auto& status = keyedStatus.second;
|
||||
PrintKey(keyedStatus.first);
|
||||
for (auto& [key, status] : result.detail->variableStatus) {
|
||||
PrintKey(key);
|
||||
cout << " {" << endl;
|
||||
cout << "reeliminated: " << status.isReeliminated << endl;
|
||||
cout << "relinearized above thresh: " << status.isAboveRelinThreshold
|
||||
|
@ -105,7 +104,7 @@ int main(int argc, char* argv[]) {
|
|||
Values currentEstimate = isam.calculateEstimate();
|
||||
currentEstimate.print("Current estimate: ");
|
||||
|
||||
boost::optional<Point3> pointEstimate = smartFactor->point(currentEstimate);
|
||||
auto pointEstimate = smartFactor->point(currentEstimate);
|
||||
if (pointEstimate) {
|
||||
cout << *pointEstimate << endl;
|
||||
} else {
|
||||
|
|
|
@ -82,7 +82,7 @@ po::variables_map parseOptions(int argc, char* argv[]) {
|
|||
return vm;
|
||||
}
|
||||
|
||||
boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
|
||||
std::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
|
||||
// We use the sensor specs to build the noise model for the IMU factor.
|
||||
double accel_noise_sigma = 0.0003924;
|
||||
double gyro_noise_sigma = 0.000205689024915;
|
||||
|
@ -94,7 +94,7 @@ boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
|
|||
I_3x3 * 1e-8; // error committed in integrating position from velocities
|
||||
Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2);
|
||||
Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2);
|
||||
Matrix66 bias_acc_omega_int =
|
||||
Matrix66 bias_acc_omega_init =
|
||||
I_6x6 * 1e-5; // error in the bias used for preintegration
|
||||
|
||||
auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0);
|
||||
|
@ -110,7 +110,7 @@ boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
|
|||
// PreintegrationCombinedMeasurements params:
|
||||
p->biasAccCovariance = bias_acc_cov; // acc bias in continuous
|
||||
p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous
|
||||
p->biasAccOmegaInt = bias_acc_omega_int;
|
||||
p->biasAccOmegaInt = bias_acc_omega_init;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
@ -267,7 +267,6 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
if (use_isam) {
|
||||
isam2->update(*graph, initial_values);
|
||||
isam2->update();
|
||||
result = isam2->calculateEstimate();
|
||||
|
||||
// reset the graph
|
||||
|
|
|
@ -115,7 +115,7 @@ int main(int argc, char* argv[]) {
|
|||
Vector6 covvec;
|
||||
covvec << 0.1, 0.1, 0.1, 0.1, 0.1, 0.1;
|
||||
auto cov = noiseModel::Diagonal::Variances(covvec);
|
||||
auto f = boost::make_shared<BetweenFactor<imuBias::ConstantBias> >(
|
||||
auto f = std::make_shared<BetweenFactor<imuBias::ConstantBias> >(
|
||||
b1, b2, imuBias::ConstantBias(), cov);
|
||||
newgraph.add(f);
|
||||
initialEstimate.insert(biasKey, imuBias::ConstantBias());
|
||||
|
|
|
@ -62,29 +62,33 @@ using namespace gtsam;
|
|||
//
|
||||
// The factor will be a unary factor, affect only a single system variable. It will
|
||||
// also use a standard Gaussian noise model. Hence, we will derive our new factor from
|
||||
// the NoiseModelFactor1.
|
||||
// the NoiseModelFactorN.
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
|
||||
class UnaryFactor: public NoiseModelFactor1<Pose2> {
|
||||
class UnaryFactor: public NoiseModelFactorN<Pose2> {
|
||||
// The factor will hold a measurement consisting of an (X,Y) location
|
||||
// We could this with a Point2 but here we just use two doubles
|
||||
double mx_, my_;
|
||||
|
||||
public:
|
||||
|
||||
// Provide access to Matrix& version of evaluateError:
|
||||
using NoiseModelFactor1<Pose2>::evaluateError;
|
||||
|
||||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<UnaryFactor> shared_ptr;
|
||||
typedef std::shared_ptr<UnaryFactor> shared_ptr;
|
||||
|
||||
// The constructor requires the variable key, the (X, Y) measurement value, and the noise model
|
||||
UnaryFactor(Key j, double x, double y, const SharedNoiseModel& model):
|
||||
NoiseModelFactor1<Pose2>(model, j), mx_(x), my_(y) {}
|
||||
NoiseModelFactorN<Pose2>(model, j), mx_(x), my_(y) {}
|
||||
|
||||
~UnaryFactor() override {}
|
||||
|
||||
// Using the NoiseModelFactor1 base class there are two functions that must be overridden.
|
||||
// Using the NoiseModelFactorN base class there are two functions that must be overridden.
|
||||
// The first is the 'evaluateError' function. This function implements the desired measurement
|
||||
// function, returning a vector of errors when evaluated at the provided variable value. It
|
||||
// must also calculate the Jacobians for this measurement function, if requested.
|
||||
Vector evaluateError(const Pose2& q, boost::optional<Matrix&> H = boost::none) const override {
|
||||
Vector evaluateError(const Pose2& q, OptionalMatrixType H) const override {
|
||||
// The measurement function for a GPS-like measurement h(q) which predicts the measurement (m) is h(q) = q, q = [qx qy qtheta]
|
||||
// The error is then simply calculated as E(q) = h(q) - m:
|
||||
// error_x = q.x - mx
|
||||
|
@ -101,7 +105,7 @@ class UnaryFactor: public NoiseModelFactor1<Pose2> {
|
|||
// circumstances, the following code that employs the default copy constructor should
|
||||
// work fine.
|
||||
gtsam::NonlinearFactor::shared_ptr clone() const override {
|
||||
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||
return std::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||
gtsam::NonlinearFactor::shared_ptr(new UnaryFactor(*this))); }
|
||||
|
||||
// Additionally, we encourage you the use of unit testing your custom factors,
|
||||
|
|
|
@ -48,16 +48,16 @@ int main(const int argc, const char *argv[]) {
|
|||
Values::shared_ptr initial;
|
||||
bool is3D = false;
|
||||
if (kernelType.compare("none") == 0) {
|
||||
boost::tie(graph, initial) = readG2o(g2oFile, is3D);
|
||||
std::tie(graph, initial) = readG2o(g2oFile, is3D);
|
||||
}
|
||||
if (kernelType.compare("huber") == 0) {
|
||||
std::cout << "Using robust kernel: huber " << std::endl;
|
||||
boost::tie(graph, initial) =
|
||||
std::tie(graph, initial) =
|
||||
readG2o(g2oFile, is3D, KernelFunctionTypeHUBER);
|
||||
}
|
||||
if (kernelType.compare("tukey") == 0) {
|
||||
std::cout << "Using robust kernel: tukey " << std::endl;
|
||||
boost::tie(graph, initial) =
|
||||
std::tie(graph, initial) =
|
||||
readG2o(g2oFile, is3D, KernelFunctionTypeTUKEY);
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ int main(const int argc, const char *argv[]) {
|
|||
std::cout << "Writing results to file: " << outputFile << std::endl;
|
||||
NonlinearFactorGraph::shared_ptr graphNoKernel;
|
||||
Values::shared_ptr initial2;
|
||||
boost::tie(graphNoKernel, initial2) = readG2o(g2oFile);
|
||||
std::tie(graphNoKernel, initial2) = readG2o(g2oFile);
|
||||
writeG2o(*graphNoKernel, result, outputFile);
|
||||
std::cout << "done! " << std::endl;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ int main (int argc, char** argv) {
|
|||
Values::shared_ptr initial;
|
||||
SharedDiagonal model = noiseModel::Diagonal::Sigmas((Vector(3) << 0.05, 0.05, 5.0 * M_PI / 180.0).finished());
|
||||
string graph_file = findExampleDataFile("w100.graph");
|
||||
boost::tie(graph, initial) = load2D(graph_file, model);
|
||||
std::tie(graph, initial) = load2D(graph_file, model);
|
||||
initial->print("Initial estimate:\n");
|
||||
|
||||
// Add a Gaussian prior on first poses
|
||||
|
|
|
@ -37,7 +37,7 @@ int main(const int argc, const char *argv[]) {
|
|||
|
||||
NonlinearFactorGraph::shared_ptr graph;
|
||||
Values::shared_ptr initial;
|
||||
boost::tie(graph, initial) = readG2o(g2oFile);
|
||||
std::tie(graph, initial) = readG2o(g2oFile);
|
||||
|
||||
// Add prior on the pose having index (key) = 0
|
||||
auto priorModel = noiseModel::Diagonal::Variances(Vector3(1e-6, 1e-6, 1e-8));
|
||||
|
@ -55,7 +55,7 @@ int main(const int argc, const char *argv[]) {
|
|||
std::cout << "Writing results to file: " << outputFile << std::endl;
|
||||
NonlinearFactorGraph::shared_ptr graphNoKernel;
|
||||
Values::shared_ptr initial2;
|
||||
boost::tie(graphNoKernel, initial2) = readG2o(g2oFile);
|
||||
std::tie(graphNoKernel, initial2) = readG2o(g2oFile);
|
||||
writeG2o(*graphNoKernel, estimateLago, outputFile);
|
||||
std::cout << "done! " << std::endl;
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ int main(int argc, char** argv) {
|
|||
// addition, the *type* of the iterativeParams decides on the type of
|
||||
// iterative solver, in this case the SPCG (subgraph PCG)
|
||||
parameters.linearSolverType = NonlinearOptimizerParams::Iterative;
|
||||
parameters.iterativeParams = boost::make_shared<SubgraphSolverParameters>();
|
||||
parameters.iterativeParams = std::make_shared<SubgraphSolverParameters>();
|
||||
|
||||
LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, parameters);
|
||||
Values result = optimizer.optimize();
|
||||
|
|
|
@ -37,15 +37,15 @@ int main(const int argc, const char* argv[]) {
|
|||
NonlinearFactorGraph::shared_ptr graph;
|
||||
Values::shared_ptr initial;
|
||||
bool is3D = true;
|
||||
boost::tie(graph, initial) = readG2o(g2oFile, is3D);
|
||||
std::tie(graph, initial) = readG2o(g2oFile, is3D);
|
||||
|
||||
// Add prior on the first key
|
||||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const auto key_value : *initial) {
|
||||
for (const auto key : initial->keys()) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
firstKey = key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
break;
|
||||
}
|
||||
|
@ -67,17 +67,15 @@ int main(const int argc, const char* argv[]) {
|
|||
std::cout << "Writing results to file: " << outputFile << std::endl;
|
||||
NonlinearFactorGraph::shared_ptr graphNoKernel;
|
||||
Values::shared_ptr initial2;
|
||||
boost::tie(graphNoKernel, initial2) = readG2o(g2oFile);
|
||||
std::tie(graphNoKernel, initial2) = readG2o(g2oFile);
|
||||
writeG2o(*graphNoKernel, result, outputFile);
|
||||
std::cout << "done! " << std::endl;
|
||||
}
|
||||
|
||||
// Calculate and print marginal covariances for all variables
|
||||
Marginals marginals(*graph, result);
|
||||
for (const auto key_value : result) {
|
||||
auto p = dynamic_cast<const GenericValue<Pose3>*>(&key_value.value);
|
||||
if (!p) continue;
|
||||
std::cout << marginals.marginalCovariance(key_value.key) << endl;
|
||||
for (const auto& [key, pose] : result.extract<Pose3>()) {
|
||||
std::cout << marginals.marginalCovariance(key) << endl;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ int main(const int argc, const char *argv[]) {
|
|||
NonlinearFactorGraph::shared_ptr graph;
|
||||
Values::shared_ptr initial;
|
||||
bool is3D = true;
|
||||
boost::tie(graph, initial) = readG2o(g2oFile, is3D);
|
||||
std::tie(graph, initial) = readG2o(g2oFile, is3D);
|
||||
|
||||
bool add = false;
|
||||
Key firstKey = 8646911284551352320;
|
||||
|
@ -55,27 +55,27 @@ int main(const int argc, const char *argv[]) {
|
|||
std::cout << "Rewriting input to file: " << inputFileRewritten << std::endl;
|
||||
// Additional: rewrite input with simplified keys 0,1,...
|
||||
Values simpleInitial;
|
||||
for(const auto key_value: *initial) {
|
||||
for (const auto k : initial->keys()) {
|
||||
Key key;
|
||||
if(add)
|
||||
key = key_value.key + firstKey;
|
||||
if (add)
|
||||
key = k + firstKey;
|
||||
else
|
||||
key = key_value.key - firstKey;
|
||||
key = k - firstKey;
|
||||
|
||||
simpleInitial.insert(key, initial->at(key_value.key));
|
||||
simpleInitial.insert(key, initial->at(k));
|
||||
}
|
||||
NonlinearFactorGraph simpleGraph;
|
||||
for(const boost::shared_ptr<NonlinearFactor>& factor: *graph) {
|
||||
boost::shared_ptr<BetweenFactor<Pose3> > pose3Between =
|
||||
boost::dynamic_pointer_cast<BetweenFactor<Pose3> >(factor);
|
||||
for(const std::shared_ptr<NonlinearFactor>& factor: *graph) {
|
||||
std::shared_ptr<BetweenFactor<Pose3> > pose3Between =
|
||||
std::dynamic_pointer_cast<BetweenFactor<Pose3> >(factor);
|
||||
if (pose3Between){
|
||||
Key key1, key2;
|
||||
if(add){
|
||||
key1 = pose3Between->key1() + firstKey;
|
||||
key2 = pose3Between->key2() + firstKey;
|
||||
key1 = pose3Between->key<1>() + firstKey;
|
||||
key2 = pose3Between->key<2>() + firstKey;
|
||||
}else{
|
||||
key1 = pose3Between->key1() - firstKey;
|
||||
key2 = pose3Between->key2() - firstKey;
|
||||
key1 = pose3Between->key<1>() - firstKey;
|
||||
key2 = pose3Between->key<2>() - firstKey;
|
||||
}
|
||||
NonlinearFactor::shared_ptr simpleFactor(
|
||||
new BetweenFactor<Pose3>(key1, key2, pose3Between->measured(), pose3Between->noiseModel()));
|
||||
|
|
|
@ -36,15 +36,15 @@ int main(const int argc, const char* argv[]) {
|
|||
NonlinearFactorGraph::shared_ptr graph;
|
||||
Values::shared_ptr initial;
|
||||
bool is3D = true;
|
||||
boost::tie(graph, initial) = readG2o(g2oFile, is3D);
|
||||
std::tie(graph, initial) = readG2o(g2oFile, is3D);
|
||||
|
||||
// Add prior on the first key
|
||||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const auto key_value : *initial) {
|
||||
for (const auto key : initial->keys()) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
firstKey = key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
break;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ int main(const int argc, const char* argv[]) {
|
|||
std::cout << "Writing results to file: " << outputFile << std::endl;
|
||||
NonlinearFactorGraph::shared_ptr graphNoKernel;
|
||||
Values::shared_ptr initial2;
|
||||
boost::tie(graphNoKernel, initial2) = readG2o(g2oFile);
|
||||
std::tie(graphNoKernel, initial2) = readG2o(g2oFile);
|
||||
writeG2o(*graphNoKernel, result, outputFile);
|
||||
std::cout << "done! " << std::endl;
|
||||
}
|
||||
|
|
|
@ -36,15 +36,15 @@ int main(const int argc, const char* argv[]) {
|
|||
NonlinearFactorGraph::shared_ptr graph;
|
||||
Values::shared_ptr initial;
|
||||
bool is3D = true;
|
||||
boost::tie(graph, initial) = readG2o(g2oFile, is3D);
|
||||
std::tie(graph, initial) = readG2o(g2oFile, is3D);
|
||||
|
||||
// Add prior on the first key
|
||||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const auto key_value : *initial) {
|
||||
for (const auto key : initial->keys()) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
firstKey = key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
break;
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ int main(const int argc, const char* argv[]) {
|
|||
std::cout << "Writing results to file: " << outputFile << std::endl;
|
||||
NonlinearFactorGraph::shared_ptr graphNoKernel;
|
||||
Values::shared_ptr initial2;
|
||||
boost::tie(graphNoKernel, initial2) = readG2o(g2oFile);
|
||||
std::tie(graphNoKernel, initial2) = readG2o(g2oFile);
|
||||
writeG2o(*graphNoKernel, initialization, outputFile);
|
||||
std::cout << "done! " << std::endl;
|
||||
}
|
||||
|
|
|
@ -36,15 +36,15 @@ int main(const int argc, const char* argv[]) {
|
|||
NonlinearFactorGraph::shared_ptr graph;
|
||||
Values::shared_ptr initial;
|
||||
bool is3D = true;
|
||||
boost::tie(graph, initial) = readG2o(g2oFile, is3D);
|
||||
std::tie(graph, initial) = readG2o(g2oFile, is3D);
|
||||
|
||||
// Add prior on the first key
|
||||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const auto key_value : *initial) {
|
||||
for (const auto key : initial->keys()) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
firstKey = key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
break;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ int main(const int argc, const char* argv[]) {
|
|||
std::cout << "Writing results to file: " << outputFile << std::endl;
|
||||
NonlinearFactorGraph::shared_ptr graphNoKernel;
|
||||
Values::shared_ptr initial2;
|
||||
boost::tie(graphNoKernel, initial2) = readG2o(g2oFile);
|
||||
std::tie(graphNoKernel, initial2) = readG2o(g2oFile);
|
||||
writeG2o(*graphNoKernel, initialization, outputFile);
|
||||
std::cout << "done! " << std::endl;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ The following examples illustrate some concepts from Georgia Tech's research pap
|
|||
## 2D Pose SLAM
|
||||
|
||||
* **LocalizationExample.cpp**: modeling robot motion
|
||||
* **LocalizationExample2.cpp**: example with GPS like measurements
|
||||
* **Pose2SLAMExample**: A 2D Pose SLAM example using the predefined typedefs in gtsam/slam/pose2SLAM.h
|
||||
* **Pose2SLAMExample_advanced**: same, but uses an Optimizer object
|
||||
* **Pose2SLAMwSPCG**: solve a simple 3 by 3 grid of Pose2 SLAM problem by using easy SPCG interface
|
||||
|
|
|
@ -10,62 +10,81 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file RangeISAMExample_plaza1.cpp
|
||||
* @file RangeISAMExample_plaza2.cpp
|
||||
* @brief A 2D Range SLAM example
|
||||
* @date June 20, 2013
|
||||
* @author FRank Dellaert
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
// Both relative poses and recovered trajectory poses will be stored as Pose2 objects
|
||||
// Both relative poses and recovered trajectory poses will be stored as Pose2.
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
using gtsam::Pose2;
|
||||
|
||||
// Each variable in the system (poses and landmarks) must be identified with a unique key.
|
||||
// We can either use simple integer keys (1, 2, 3, ...) or symbols (X1, X2, L1).
|
||||
// Here we will use Symbols
|
||||
// gtsam::Vectors are dynamic Eigen vectors, Vector3 is statically sized.
|
||||
#include <gtsam/base/Vector.h>
|
||||
using gtsam::Vector;
|
||||
using gtsam::Vector3;
|
||||
|
||||
// Unknown landmarks are of type Point2 (which is just a 2D Eigen vector).
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
using gtsam::Point2;
|
||||
|
||||
// Each variable in the system (poses and landmarks) must be identified with a
|
||||
// unique key. We can either use simple integer keys (1, 2, 3, ...) or symbols
|
||||
// (X1, X2, L1). Here we will use Symbols.
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
using gtsam::Symbol;
|
||||
|
||||
// We want to use iSAM2 to solve the range-SLAM problem incrementally
|
||||
// We want to use iSAM2 to solve the range-SLAM problem incrementally.
|
||||
#include <gtsam/nonlinear/ISAM2.h>
|
||||
|
||||
// iSAM2 requires as input a set set of new factors to be added stored in a factor graph,
|
||||
// and initial guesses for any new variables used in the added factors
|
||||
// iSAM2 requires as input a set set of new factors to be added stored in a
|
||||
// factor graph, and initial guesses for any new variables in the added factors.
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
|
||||
// We will use a non-liear solver to batch-inituialize from the first 150 frames
|
||||
// We will use a non-linear solver to batch-initialize from the first 150 frames
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
// In GTSAM, measurement functions are represented as 'factors'. Several common factors
|
||||
// have been provided with the library for solving robotics SLAM problems.
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
// Measurement functions are represented as 'factors'. Several common factors
|
||||
// have been provided with the library for solving robotics SLAM problems:
|
||||
#include <gtsam/sam/RangeFactor.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
// Standard headers, added last, so we know headers above work on their own
|
||||
// Timing, with functions below, provides nice facilities to benchmark.
|
||||
#include <gtsam/base/timing.h>
|
||||
using gtsam::tictoc_print_;
|
||||
|
||||
// Standard headers, added last, so we know headers above work on their own.
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
namespace NM = gtsam::noiseModel;
|
||||
|
||||
// data available at http://www.frc.ri.cmu.edu/projects/emergencyresponse/RangeData/
|
||||
// Datafile format (from http://www.frc.ri.cmu.edu/projects/emergencyresponse/RangeData/log.html)
|
||||
// Data is second UWB ranging dataset, B2 or "plaza 2", from
|
||||
// "Navigating with Ranging Radios: Five Data Sets with Ground Truth"
|
||||
// by Joseph Djugash, Bradley Hamner, and Stephan Roth
|
||||
// https://www.ri.cmu.edu/pub_files/2009/9/Final_5datasetsRangingRadios.pdf
|
||||
|
||||
// load the odometry
|
||||
// DR: Odometry Input (delta distance traveled and delta heading change)
|
||||
// Time (sec) Delta Dist. Trav. (m) Delta Heading (rad)
|
||||
typedef pair<double, Pose2> TimedOdometry;
|
||||
list<TimedOdometry> readOdometry() {
|
||||
list<TimedOdometry> odometryList;
|
||||
string data_file = findExampleDataFile("Plaza2_DR.txt");
|
||||
ifstream is(data_file.c_str());
|
||||
// Time (sec) Delta Distance Traveled (m) Delta Heading (rad)
|
||||
using TimedOdometry = std::pair<double, Pose2>;
|
||||
std::list<TimedOdometry> readOdometry() {
|
||||
std::list<TimedOdometry> odometryList;
|
||||
std::string data_file = gtsam::findExampleDataFile("Plaza2_DR.txt");
|
||||
std::ifstream is(data_file.c_str());
|
||||
|
||||
while (is) {
|
||||
double t, distance_traveled, delta_heading;
|
||||
is >> t >> distance_traveled >> delta_heading;
|
||||
odometryList.push_back(
|
||||
TimedOdometry(t, Pose2(distance_traveled, 0, delta_heading)));
|
||||
odometryList.emplace_back(t, Pose2(distance_traveled, 0, delta_heading));
|
||||
}
|
||||
is.clear(); /* clears the end-of-file and error flags */
|
||||
return odometryList;
|
||||
|
@ -73,90 +92,85 @@ list<TimedOdometry> readOdometry() {
|
|||
|
||||
// load the ranges from TD
|
||||
// Time (sec) Sender / Antenna ID Receiver Node ID Range (m)
|
||||
typedef boost::tuple<double, size_t, double> RangeTriple;
|
||||
vector<RangeTriple> readTriples() {
|
||||
vector<RangeTriple> triples;
|
||||
string data_file = findExampleDataFile("Plaza2_TD.txt");
|
||||
ifstream is(data_file.c_str());
|
||||
using RangeTriple = std::tuple<double, size_t, double>;
|
||||
std::vector<RangeTriple> readTriples() {
|
||||
std::vector<RangeTriple> triples;
|
||||
std::string data_file = gtsam::findExampleDataFile("Plaza2_TD.txt");
|
||||
std::ifstream is(data_file.c_str());
|
||||
|
||||
while (is) {
|
||||
double t, sender, range;
|
||||
size_t receiver;
|
||||
double t, range, sender, receiver;
|
||||
is >> t >> sender >> receiver >> range;
|
||||
triples.push_back(RangeTriple(t, receiver, range));
|
||||
triples.emplace_back(t, receiver, range);
|
||||
}
|
||||
is.clear(); /* clears the end-of-file and error flags */
|
||||
return triples;
|
||||
}
|
||||
|
||||
// main
|
||||
int main (int argc, char** argv) {
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
// load Plaza2 data
|
||||
list<TimedOdometry> odometry = readOdometry();
|
||||
// size_t M = odometry.size();
|
||||
std::list<TimedOdometry> odometry = readOdometry();
|
||||
size_t M = odometry.size();
|
||||
std::cout << "Read " << M << " odometry entries." << std::endl;
|
||||
|
||||
vector<RangeTriple> triples = readTriples();
|
||||
std::vector<RangeTriple> triples = readTriples();
|
||||
size_t K = triples.size();
|
||||
std::cout << "Read " << K << " range triples." << std::endl;
|
||||
|
||||
// parameters
|
||||
size_t minK = 150; // minimum number of range measurements to process initially
|
||||
size_t incK = 25; // minimum number of range measurements to process after
|
||||
bool groundTruth = false;
|
||||
size_t minK =
|
||||
150; // minimum number of range measurements to process initially
|
||||
size_t incK = 25; // minimum number of range measurements to process after
|
||||
bool robust = true;
|
||||
|
||||
// Set Noise parameters
|
||||
Vector priorSigmas = Vector3(1,1,M_PI);
|
||||
Vector priorSigmas = Vector3(1, 1, M_PI);
|
||||
Vector odoSigmas = Vector3(0.05, 0.01, 0.1);
|
||||
double sigmaR = 100; // range standard deviation
|
||||
const NM::Base::shared_ptr // all same type
|
||||
priorNoise = NM::Diagonal::Sigmas(priorSigmas), //prior
|
||||
odoNoise = NM::Diagonal::Sigmas(odoSigmas), // odometry
|
||||
gaussian = NM::Isotropic::Sigma(1, sigmaR), // non-robust
|
||||
tukey = NM::Robust::Create(NM::mEstimator::Tukey::Create(15), gaussian), //robust
|
||||
rangeNoise = robust ? tukey : gaussian;
|
||||
double sigmaR = 100; // range standard deviation
|
||||
const NM::Base::shared_ptr // all same type
|
||||
priorNoise = NM::Diagonal::Sigmas(priorSigmas), // prior
|
||||
looseNoise = NM::Isotropic::Sigma(2, 1000), // loose LM prior
|
||||
odoNoise = NM::Diagonal::Sigmas(odoSigmas), // odometry
|
||||
gaussian = NM::Isotropic::Sigma(1, sigmaR), // non-robust
|
||||
tukey = NM::Robust::Create(NM::mEstimator::Tukey::Create(15),
|
||||
gaussian), // robust
|
||||
rangeNoise = robust ? tukey : gaussian;
|
||||
|
||||
// Initialize iSAM
|
||||
ISAM2 isam;
|
||||
gtsam::ISAM2 isam;
|
||||
|
||||
// Add prior on first pose
|
||||
Pose2 pose0 = Pose2(-34.2086489999201, 45.3007639991120,
|
||||
M_PI - 2.02108900000000);
|
||||
NonlinearFactorGraph newFactors;
|
||||
Pose2 pose0 = Pose2(-34.2086489999201, 45.3007639991120, M_PI - 2.021089);
|
||||
gtsam::NonlinearFactorGraph newFactors;
|
||||
newFactors.addPrior(0, pose0, priorNoise);
|
||||
Values initial;
|
||||
gtsam::Values initial;
|
||||
initial.insert(0, pose0);
|
||||
|
||||
// initialize points
|
||||
if (groundTruth) { // from TL file
|
||||
initial.insert(symbol('L', 1), Point2(-68.9265, 18.3778));
|
||||
initial.insert(symbol('L', 6), Point2(-37.5805, 69.2278));
|
||||
initial.insert(symbol('L', 0), Point2(-33.6205, 26.9678));
|
||||
initial.insert(symbol('L', 5), Point2(1.7095, -5.8122));
|
||||
} else { // drawn from sigma=1 Gaussian in matlab version
|
||||
initial.insert(symbol('L', 1), Point2(3.5784, 2.76944));
|
||||
initial.insert(symbol('L', 6), Point2(-1.34989, 3.03492));
|
||||
initial.insert(symbol('L', 0), Point2(0.725404, -0.0630549));
|
||||
initial.insert(symbol('L', 5), Point2(0.714743, -0.204966));
|
||||
}
|
||||
// We will initialize landmarks randomly, and keep track of which landmarks we
|
||||
// already added with a set.
|
||||
std::mt19937_64 rng;
|
||||
std::normal_distribution<double> normal(0.0, 100.0);
|
||||
std::set<Symbol> initializedLandmarks;
|
||||
|
||||
// set some loop variables
|
||||
size_t i = 1; // step counter
|
||||
size_t k = 0; // range measurement counter
|
||||
size_t i = 1; // step counter
|
||||
size_t k = 0; // range measurement counter
|
||||
bool initialized = false;
|
||||
Pose2 lastPose = pose0;
|
||||
size_t countK = 0;
|
||||
|
||||
// Loop over odometry
|
||||
gttic_(iSAM);
|
||||
for(const TimedOdometry& timedOdometry: odometry) {
|
||||
//--------------------------------- odometry loop -----------------------------------------
|
||||
for (const TimedOdometry& timedOdometry : odometry) {
|
||||
//--------------------------------- odometry loop --------------------------
|
||||
double t;
|
||||
Pose2 odometry;
|
||||
boost::tie(t, odometry) = timedOdometry;
|
||||
std::tie(t, odometry) = timedOdometry;
|
||||
|
||||
// add odometry factor
|
||||
newFactors.push_back(BetweenFactor<Pose2>(i-1, i, odometry, odoNoise));
|
||||
newFactors.emplace_shared<gtsam::BetweenFactor<Pose2>>(i - 1, i, odometry,
|
||||
odoNoise);
|
||||
|
||||
// predict pose and add as initial estimate
|
||||
Pose2 predictedPose = lastPose.compose(odometry);
|
||||
|
@ -164,19 +178,32 @@ int main (int argc, char** argv) {
|
|||
initial.insert(i, predictedPose);
|
||||
|
||||
// Check if there are range factors to be added
|
||||
while (k < K && t >= boost::get<0>(triples[k])) {
|
||||
size_t j = boost::get<1>(triples[k]);
|
||||
double range = boost::get<2>(triples[k]);
|
||||
newFactors.push_back(RangeFactor<Pose2, Point2>(i, symbol('L', j), range,rangeNoise));
|
||||
while (k < K && t >= std::get<0>(triples[k])) {
|
||||
size_t j = std::get<1>(triples[k]);
|
||||
Symbol landmark_key('L', j);
|
||||
double range = std::get<2>(triples[k]);
|
||||
newFactors.emplace_shared<gtsam::RangeFactor<Pose2, Point2>>(
|
||||
i, landmark_key, range, rangeNoise);
|
||||
if (initializedLandmarks.count(landmark_key) == 0) {
|
||||
std::cout << "adding landmark " << j << std::endl;
|
||||
double x = normal(rng), y = normal(rng);
|
||||
initial.insert(landmark_key, Point2(x, y));
|
||||
initializedLandmarks.insert(landmark_key);
|
||||
// We also add a very loose prior on the landmark in case there is only
|
||||
// one sighting, which cannot fully determine the landmark.
|
||||
newFactors.emplace_shared<gtsam::PriorFactor<Point2>>(
|
||||
landmark_key, Point2(0, 0), looseNoise);
|
||||
}
|
||||
k = k + 1;
|
||||
countK = countK + 1;
|
||||
}
|
||||
|
||||
// Check whether to update iSAM 2
|
||||
if ((k > minK) && (countK > incK)) {
|
||||
if (!initialized) { // Do a full optimize for first minK ranges
|
||||
if (!initialized) { // Do a full optimize for first minK ranges
|
||||
std::cout << "Initializing at time " << k << std::endl;
|
||||
gttic_(batchInitialization);
|
||||
LevenbergMarquardtOptimizer batchOptimizer(newFactors, initial);
|
||||
gtsam::LevenbergMarquardtOptimizer batchOptimizer(newFactors, initial);
|
||||
initial = batchOptimizer.optimize();
|
||||
gttoc_(batchInitialization);
|
||||
initialized = true;
|
||||
|
@ -185,21 +212,27 @@ int main (int argc, char** argv) {
|
|||
isam.update(newFactors, initial);
|
||||
gttoc_(update);
|
||||
gttic_(calculateEstimate);
|
||||
Values result = isam.calculateEstimate();
|
||||
gtsam::Values result = isam.calculateEstimate();
|
||||
gttoc_(calculateEstimate);
|
||||
lastPose = result.at<Pose2>(i);
|
||||
newFactors = NonlinearFactorGraph();
|
||||
initial = Values();
|
||||
newFactors = gtsam::NonlinearFactorGraph();
|
||||
initial = gtsam::Values();
|
||||
countK = 0;
|
||||
}
|
||||
i += 1;
|
||||
//--------------------------------- odometry loop -----------------------------------------
|
||||
} // end for
|
||||
//--------------------------------- odometry loop --------------------------
|
||||
} // end for
|
||||
gttoc_(iSAM);
|
||||
|
||||
// Print timings
|
||||
tictoc_print_();
|
||||
|
||||
// Print optimized landmarks:
|
||||
gtsam::Values finalResult = isam.calculateEstimate();
|
||||
for (auto&& landmark_key : initializedLandmarks) {
|
||||
Point2 p = finalResult.at<Point2>(landmark_key);
|
||||
std::cout << landmark_key << ":" << p.transpose() << "\n";
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,10 +26,11 @@
|
|||
#include <gtsam/nonlinear/ExpressionFactorGraph.h>
|
||||
|
||||
// Header order is close to far
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
@ -48,8 +49,7 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
// Load the SfM data from file
|
||||
SfmData mydata = SfmData::FromBalFile(filename);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") %
|
||||
mydata.numberTracks() % mydata.numberCameras();
|
||||
cout << "read " << mydata.numberTracks() << " tracks on " << mydata.numberCameras() << " cameras" << endl;
|
||||
|
||||
// Create a factor graph
|
||||
ExpressionFactorGraph graph;
|
||||
|
@ -77,9 +77,7 @@ int main(int argc, char* argv[]) {
|
|||
for (const SfmTrack& track : mydata.tracks) {
|
||||
// Leaf expression for j^th point
|
||||
Point3_ point_('p', j);
|
||||
for (const SfmMeasurement& m : track.measurements) {
|
||||
size_t i = m.first;
|
||||
Point2 uv = m.second;
|
||||
for (const auto& [i, uv] : track.measurements) {
|
||||
// Leaf expression for i^th camera
|
||||
Expression<SfmCamera> camera_(C(i));
|
||||
// Below an expression for the prediction of the measurement:
|
||||
|
|
|
@ -112,12 +112,12 @@ int main(int argc, char* argv[]) {
|
|||
for (size_t j = 0; j < points.size(); ++j) {
|
||||
|
||||
// The graph stores Factor shared_ptrs, so we cast back to a SmartFactor first
|
||||
SmartFactor::shared_ptr smart = boost::dynamic_pointer_cast<SmartFactor>(graph[j]);
|
||||
SmartFactor::shared_ptr smart = std::dynamic_pointer_cast<SmartFactor>(graph[j]);
|
||||
if (smart) {
|
||||
// The output of point() is in boost::optional<Point3>, as sometimes
|
||||
// The output of point() is in std::optional<Point3>, as sometimes
|
||||
// the triangulation operation inside smart factor will encounter degeneracy.
|
||||
boost::optional<Point3> point = smart->point(result);
|
||||
if (point) // ignore if boost::optional return nullptr
|
||||
auto point = smart->point(result);
|
||||
if (point) // ignore if std::optional return nullptr
|
||||
landmark_result.insert(j, *point);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,9 +93,9 @@ int main(int argc, char* argv[]) {
|
|||
parameters.relativeErrorTol = 1e-10;
|
||||
parameters.maxIterations = 500;
|
||||
PCGSolverParameters::shared_ptr pcg =
|
||||
boost::make_shared<PCGSolverParameters>();
|
||||
std::make_shared<PCGSolverParameters>();
|
||||
pcg->preconditioner_ =
|
||||
boost::make_shared<BlockJacobiPreconditionerParameters>();
|
||||
std::make_shared<BlockJacobiPreconditionerParameters>();
|
||||
// Following is crucial:
|
||||
pcg->setEpsilon_abs(1e-10);
|
||||
pcg->setEpsilon_rel(1e-10);
|
||||
|
@ -108,10 +108,10 @@ int main(int argc, char* argv[]) {
|
|||
result.print("Final results:\n");
|
||||
Values landmark_result;
|
||||
for (size_t j = 0; j < points.size(); ++j) {
|
||||
auto smart = boost::dynamic_pointer_cast<SmartFactor>(graph[j]);
|
||||
auto smart = std::dynamic_pointer_cast<SmartFactor>(graph[j]);
|
||||
if (smart) {
|
||||
boost::optional<Point3> point = smart->point(result);
|
||||
if (point) // ignore if boost::optional return nullptr
|
||||
std::optional<Point3> point = smart->point(result);
|
||||
if (point) // ignore if std::optional return nullptr
|
||||
landmark_result.insert(j, *point);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,12 +16,13 @@
|
|||
*/
|
||||
|
||||
// For an explanation of headers, see SFMExample.cpp
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
@ -43,7 +44,7 @@ int main (int argc, char* argv[]) {
|
|||
|
||||
// Load the SfM data from file
|
||||
SfmData mydata = SfmData::FromBalFile(filename);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") % mydata.numberTracks() % mydata.numberCameras();
|
||||
cout << "read " << mydata.numberTracks() << " tracks on " << mydata.numberCameras() << " cameras" << endl;
|
||||
|
||||
// Create a factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
|
@ -55,9 +56,7 @@ int main (int argc, char* argv[]) {
|
|||
// Add measurements to the factor graph
|
||||
size_t j = 0;
|
||||
for(const SfmTrack& track: mydata.tracks) {
|
||||
for(const SfmMeasurement& m: track.measurements) {
|
||||
size_t i = m.first;
|
||||
Point2 uv = m.second;
|
||||
for (const auto& [i, uv] : track.measurements) {
|
||||
graph.emplace_shared<MyFactor>(uv, noise, C(i), P(j)); // note use of shorthand symbols C and P
|
||||
}
|
||||
j += 1;
|
||||
|
|
|
@ -17,14 +17,13 @@
|
|||
*/
|
||||
|
||||
// For an explanation of headers, see SFMExample.cpp
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
|
||||
#include <vector>
|
||||
|
@ -47,8 +46,7 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
// Load the SfM data from file
|
||||
SfmData mydata = SfmData::FromBalFile(filename);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") %
|
||||
mydata.numberTracks() % mydata.numberCameras();
|
||||
cout << "read " << mydata.numberTracks() << " tracks on " << mydata.numberCameras() << " cameras" << endl;
|
||||
|
||||
// Create a factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
|
@ -59,9 +57,7 @@ int main(int argc, char* argv[]) {
|
|||
// Add measurements to the factor graph
|
||||
size_t j = 0;
|
||||
for (const SfmTrack& track : mydata.tracks) {
|
||||
for (const SfmMeasurement& m : track.measurements) {
|
||||
size_t i = m.first;
|
||||
Point2 uv = m.second;
|
||||
for (const auto& [i, uv] : track.measurements) {
|
||||
graph.emplace_shared<MyFactor>(
|
||||
uv, noise, C(i), P(j)); // note use of shorthand symbols C and P
|
||||
}
|
||||
|
@ -130,9 +126,9 @@ int main(int argc, char* argv[]) {
|
|||
cout << endl << endl;
|
||||
|
||||
cout << "Time comparison by solving " << filename << " results:" << endl;
|
||||
cout << boost::format("%1% point tracks and %2% cameras\n") %
|
||||
mydata.numberTracks() % mydata.numberCameras()
|
||||
<< endl;
|
||||
|
||||
cout << mydata.numberTracks() << " point tracks and " << mydata.numberCameras()
|
||||
<< " cameras" << endl;
|
||||
|
||||
tictoc_print_();
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
* Passing function argument allows to specificy an initial position, a pose increment and step count.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
// As this is a full 3D problem, we will use Pose3 variables to represent the camera
|
||||
// positions and Point3 variables (x, y, z) to represent the landmark coordinates.
|
||||
// Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y).
|
||||
|
@ -66,4 +68,4 @@ std::vector<gtsam::Pose3> createPoses(
|
|||
}
|
||||
|
||||
return poses;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ int main(int argc, char* argv[]) {
|
|||
auto result = shonan.run(initial, pMin);
|
||||
|
||||
// Parse file again to set up translation problem, adding a prior
|
||||
boost::tie(inputGraph, posesInFile) = load2D(inputFile);
|
||||
std::tie(inputGraph, posesInFile) = load2D(inputFile);
|
||||
auto priorModel = noiseModel::Unit::Create(3);
|
||||
inputGraph->addPrior(0, posesInFile->at<Pose2>(0), priorModel);
|
||||
|
||||
|
@ -119,7 +119,7 @@ int main(int argc, char* argv[]) {
|
|||
auto result = shonan.run(initial, pMin);
|
||||
|
||||
// Parse file again to set up translation problem, adding a prior
|
||||
boost::tie(inputGraph, posesInFile) = load3D(inputFile);
|
||||
std::tie(inputGraph, posesInFile) = load3D(inputFile);
|
||||
auto priorModel = noiseModel::Unit::Create(6);
|
||||
inputGraph->addPrior(0, posesInFile->at<Pose3>(0), priorModel);
|
||||
|
||||
|
|
|
@ -49,8 +49,6 @@
|
|||
#include <boost/archive/binary_oarchive.hpp>
|
||||
#include <boost/program_options.hpp>
|
||||
#include <boost/range/algorithm/set_algorithm.hpp>
|
||||
#include <boost/range/adaptor/reversed.hpp>
|
||||
#include <boost/serialization/export.hpp>
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
@ -69,8 +67,8 @@ namespace br = boost::range;
|
|||
|
||||
typedef Pose2 Pose;
|
||||
|
||||
typedef NoiseModelFactor1<Pose> NM1;
|
||||
typedef NoiseModelFactor2<Pose,Pose> NM2;
|
||||
typedef NoiseModelFactorN<Pose> NM1;
|
||||
typedef NoiseModelFactorN<Pose,Pose> NM2;
|
||||
typedef BearingRangeFactor<Pose,Point2> BR;
|
||||
|
||||
double chi2_red(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& config) {
|
||||
|
@ -79,7 +77,7 @@ double chi2_red(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& c
|
|||
// the factor graph already includes a factor for the prior/equality constraint.
|
||||
// double dof = graph.size() - config.size();
|
||||
int graph_dim = 0;
|
||||
for(const boost::shared_ptr<gtsam::NonlinearFactor>& nlf: graph) {
|
||||
for(const std::shared_ptr<gtsam::NonlinearFactor>& nlf: graph) {
|
||||
graph_dim += (int)nlf->dim();
|
||||
}
|
||||
double dof = double(graph_dim) - double(config.dim()); // kaess: changed to dim
|
||||
|
@ -259,9 +257,9 @@ void runIncremental()
|
|||
while(nextMeasurement < datasetMeasurements.size())
|
||||
{
|
||||
if(BetweenFactor<Pose>::shared_ptr factor =
|
||||
boost::dynamic_pointer_cast<BetweenFactor<Pose> >(datasetMeasurements[nextMeasurement]))
|
||||
std::dynamic_pointer_cast<BetweenFactor<Pose> >(datasetMeasurements[nextMeasurement]))
|
||||
{
|
||||
Key key1 = factor->key1(), key2 = factor->key2();
|
||||
Key key1 = factor->key<1>(), key2 = factor->key<2>();
|
||||
if(((int)key1 >= firstStep && key1 < key2) || ((int)key2 >= firstStep && key2 < key1)) {
|
||||
// We found an odometry starting at firstStep
|
||||
firstPose = std::min(key1, key2);
|
||||
|
@ -310,14 +308,14 @@ void runIncremental()
|
|||
NonlinearFactor::shared_ptr measurementf = datasetMeasurements[nextMeasurement];
|
||||
|
||||
if(BetweenFactor<Pose>::shared_ptr factor =
|
||||
boost::dynamic_pointer_cast<BetweenFactor<Pose> >(measurementf))
|
||||
std::dynamic_pointer_cast<BetweenFactor<Pose> >(measurementf))
|
||||
{
|
||||
// Stop collecting measurements that are for future steps
|
||||
if(factor->key1() > step || factor->key2() > step)
|
||||
if(factor->key<1>() > step || factor->key<2>() > step)
|
||||
break;
|
||||
|
||||
// Require that one of the nodes is the current one
|
||||
if(factor->key1() != step && factor->key2() != step)
|
||||
if(factor->key<1>() != step && factor->key<2>() != step)
|
||||
throw runtime_error("Problem in data file, out-of-sequence measurements");
|
||||
|
||||
// Add a new factor
|
||||
|
@ -325,28 +323,28 @@ void runIncremental()
|
|||
const auto& measured = factor->measured();
|
||||
|
||||
// Initialize the new variable
|
||||
if(factor->key1() > factor->key2()) {
|
||||
if(!newVariables.exists(factor->key1())) { // Only need to check newVariables since loop closures come after odometry
|
||||
if(factor->key<1>() > factor->key<2>()) {
|
||||
if(!newVariables.exists(factor->key<1>())) { // Only need to check newVariables since loop closures come after odometry
|
||||
if(step == 1)
|
||||
newVariables.insert(factor->key1(), measured.inverse());
|
||||
newVariables.insert(factor->key<1>(), measured.inverse());
|
||||
else {
|
||||
Pose prevPose = isam2.calculateEstimate<Pose>(factor->key2());
|
||||
newVariables.insert(factor->key1(), prevPose * measured.inverse());
|
||||
Pose prevPose = isam2.calculateEstimate<Pose>(factor->key<2>());
|
||||
newVariables.insert(factor->key<1>(), prevPose * measured.inverse());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if(!newVariables.exists(factor->key2())) { // Only need to check newVariables since loop closures come after odometry
|
||||
if(!newVariables.exists(factor->key<2>())) { // Only need to check newVariables since loop closures come after odometry
|
||||
if(step == 1)
|
||||
newVariables.insert(factor->key2(), measured);
|
||||
newVariables.insert(factor->key<2>(), measured);
|
||||
else {
|
||||
Pose prevPose = isam2.calculateEstimate<Pose>(factor->key1());
|
||||
newVariables.insert(factor->key2(), prevPose * measured);
|
||||
Pose prevPose = isam2.calculateEstimate<Pose>(factor->key<1>());
|
||||
newVariables.insert(factor->key<2>(), prevPose * measured);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else if(BearingRangeFactor<Pose, Point2>::shared_ptr factor =
|
||||
boost::dynamic_pointer_cast<BearingRangeFactor<Pose, Point2> >(measurementf))
|
||||
std::dynamic_pointer_cast<BearingRangeFactor<Pose, Point2> >(measurementf))
|
||||
{
|
||||
Key poseKey = factor->keys()[0], lmKey = factor->keys()[1];
|
||||
|
||||
|
@ -559,12 +557,12 @@ void runPerturb()
|
|||
|
||||
// Perturb values
|
||||
VectorValues noise;
|
||||
for(const Values::KeyValuePair key_val: initial)
|
||||
for(const auto& [key, dim]: initial.dims())
|
||||
{
|
||||
Vector noisev(key_val.value.dim());
|
||||
Vector noisev(dim);
|
||||
for(Vector::Index i = 0; i < noisev.size(); ++i)
|
||||
noisev(i) = normal(rng);
|
||||
noise.insert(key_val.key, noisev);
|
||||
noise.insert(key, noisev);
|
||||
}
|
||||
Values perturbed = initial.retract(noise);
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/Cal3_S2Stereo.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/nonlinear/utilities.h>
|
||||
#include <gtsam/nonlinear/NonlinearEquality.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
@ -113,7 +114,7 @@ int main(int argc, char** argv) {
|
|||
Values result = optimizer.optimize();
|
||||
|
||||
cout << "Final result sample:" << endl;
|
||||
Values pose_values = result.filter<Pose3>();
|
||||
Values pose_values = utilities::allPose3s(result);
|
||||
pose_values.print("Final camera poses:\n");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -18,13 +18,11 @@
|
|||
#include <gtsam/global_includes.h>
|
||||
#include <gtsam/base/Matrix.h>
|
||||
|
||||
#include <boost/assign/list_of.hpp>
|
||||
#include <map>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
using boost::assign::list_of;
|
||||
|
||||
#ifdef GTSAM_USE_TBB
|
||||
|
||||
|
@ -81,7 +79,7 @@ map<int, double> testWithoutMemoryAllocation(int num_threads)
|
|||
// Now call it
|
||||
vector<double> results(numberOfProblems);
|
||||
|
||||
const vector<size_t> grainSizes = list_of(1)(10)(100)(1000);
|
||||
const vector<size_t> grainSizes = {1, 10, 100, 1000};
|
||||
map<int, double> timingResults;
|
||||
for(size_t grainSize: grainSizes)
|
||||
{
|
||||
|
@ -145,7 +143,7 @@ map<int, double> testWithMemoryAllocation(int num_threads)
|
|||
// Now call it
|
||||
vector<double> results(numberOfProblems);
|
||||
|
||||
const vector<size_t> grainSizes = list_of(1)(10)(100)(1000);
|
||||
const vector<size_t> grainSizes = {1, 10, 100, 1000};
|
||||
map<int, double> timingResults;
|
||||
for(size_t grainSize: grainSizes)
|
||||
{
|
||||
|
@ -172,7 +170,7 @@ int main(int argc, char* argv[])
|
|||
cout << "numberOfProblems = " << numberOfProblems << endl;
|
||||
cout << "problemSize = " << problemSize << endl;
|
||||
|
||||
const vector<int> numThreads = list_of(1)(4)(8);
|
||||
const vector<int> numThreads = {1, 4, 8};
|
||||
Results results;
|
||||
|
||||
for(size_t n: numThreads)
|
||||
|
|
|
@ -0,0 +1,160 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file TriangulationLOSTExample.cpp
|
||||
* @author Akshay Krishnan
|
||||
* @brief This example runs triangulation several times using 3 different
|
||||
* approaches: LOST, DLT, and DLT with optimization. It reports the covariance
|
||||
* and the runtime for each approach.
|
||||
*
|
||||
* @date 2022-07-10
|
||||
*/
|
||||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/geometry/triangulation.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
#include <optional>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
static std::mt19937 rng(42);
|
||||
|
||||
void PrintCovarianceStats(const Matrix& mat, const std::string& method) {
|
||||
Matrix centered = mat.rowwise() - mat.colwise().mean();
|
||||
Matrix cov = (centered.adjoint() * centered) / double(mat.rows() - 1);
|
||||
std::cout << method << " covariance: " << std::endl;
|
||||
std::cout << cov << std::endl;
|
||||
std::cout << "Trace sqrt: " << sqrt(cov.trace()) << std::endl << std::endl;
|
||||
}
|
||||
|
||||
void PrintDuration(const std::chrono::nanoseconds dur, double num_samples,
|
||||
const std::string& method) {
|
||||
double nanoseconds = dur.count() / num_samples;
|
||||
std::cout << "Time taken by " << method << ": " << nanoseconds * 1e-3
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
void GetLargeCamerasDataset(CameraSet<PinholeCamera<Cal3_S2>>* cameras,
|
||||
std::vector<Pose3>* poses, Point3* point,
|
||||
Point2Vector* measurements) {
|
||||
const double minXY = -10, maxXY = 10;
|
||||
const double minZ = -20, maxZ = 0;
|
||||
const int nrCameras = 500;
|
||||
cameras->reserve(nrCameras);
|
||||
poses->reserve(nrCameras);
|
||||
measurements->reserve(nrCameras);
|
||||
*point = Point3(0.0, 0.0, 10.0);
|
||||
|
||||
std::uniform_real_distribution<double> rand_xy(minXY, maxXY);
|
||||
std::uniform_real_distribution<double> rand_z(minZ, maxZ);
|
||||
Cal3_S2 identityK;
|
||||
|
||||
for (int i = 0; i < nrCameras; ++i) {
|
||||
Point3 wti(rand_xy(rng), rand_xy(rng), rand_z(rng));
|
||||
Pose3 wTi(Rot3(), wti);
|
||||
|
||||
poses->push_back(wTi);
|
||||
cameras->emplace_back(wTi, identityK);
|
||||
measurements->push_back(cameras->back().project(*point));
|
||||
}
|
||||
}
|
||||
|
||||
void GetSmallCamerasDataset(CameraSet<PinholeCamera<Cal3_S2>>* cameras,
|
||||
std::vector<Pose3>* poses, Point3* point,
|
||||
Point2Vector* measurements) {
|
||||
Pose3 pose1;
|
||||
Pose3 pose2(Rot3(), Point3(5., 0., -5.));
|
||||
Cal3_S2 identityK;
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, identityK);
|
||||
PinholeCamera<Cal3_S2> camera2(pose2, identityK);
|
||||
|
||||
*point = Point3(0, 0, 1);
|
||||
cameras->push_back(camera1);
|
||||
cameras->push_back(camera2);
|
||||
*poses = {pose1, pose2};
|
||||
*measurements = {camera1.project(*point), camera2.project(*point)};
|
||||
}
|
||||
|
||||
Point2Vector AddNoiseToMeasurements(const Point2Vector& measurements,
|
||||
const double measurementSigma) {
|
||||
std::normal_distribution<double> normal(0.0, measurementSigma);
|
||||
|
||||
Point2Vector noisyMeasurements;
|
||||
noisyMeasurements.reserve(measurements.size());
|
||||
for (const auto& p : measurements) {
|
||||
noisyMeasurements.emplace_back(p.x() + normal(rng), p.y() + normal(rng));
|
||||
}
|
||||
return noisyMeasurements;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main(int argc, char* argv[]) {
|
||||
CameraSet<PinholeCamera<Cal3_S2>> cameras;
|
||||
std::vector<Pose3> poses;
|
||||
Point3 landmark;
|
||||
Point2Vector measurements;
|
||||
GetLargeCamerasDataset(&cameras, &poses, &landmark, &measurements);
|
||||
// GetSmallCamerasDataset(&cameras, &poses, &landmark, &measurements);
|
||||
const double measurementSigma = 1e-2;
|
||||
SharedNoiseModel measurementNoise =
|
||||
noiseModel::Isotropic::Sigma(2, measurementSigma);
|
||||
|
||||
const long int nrTrials = 1000;
|
||||
Matrix errorsDLT = Matrix::Zero(nrTrials, 3);
|
||||
Matrix errorsLOST = Matrix::Zero(nrTrials, 3);
|
||||
Matrix errorsDLTOpt = Matrix::Zero(nrTrials, 3);
|
||||
|
||||
double rank_tol = 1e-9;
|
||||
std::shared_ptr<Cal3_S2> calib = std::make_shared<Cal3_S2>();
|
||||
std::chrono::nanoseconds durationDLT;
|
||||
std::chrono::nanoseconds durationDLTOpt;
|
||||
std::chrono::nanoseconds durationLOST;
|
||||
|
||||
for (int i = 0; i < nrTrials; i++) {
|
||||
Point2Vector noisyMeasurements =
|
||||
AddNoiseToMeasurements(measurements, measurementSigma);
|
||||
|
||||
auto lostStart = std::chrono::high_resolution_clock::now();
|
||||
auto estimateLOST = triangulatePoint3<Cal3_S2>(
|
||||
cameras, noisyMeasurements, rank_tol, false, measurementNoise, true);
|
||||
durationLOST += std::chrono::high_resolution_clock::now() - lostStart;
|
||||
|
||||
auto dltStart = std::chrono::high_resolution_clock::now();
|
||||
auto estimateDLT = triangulatePoint3<Cal3_S2>(
|
||||
cameras, noisyMeasurements, rank_tol, false, measurementNoise, false);
|
||||
durationDLT += std::chrono::high_resolution_clock::now() - dltStart;
|
||||
|
||||
auto dltOptStart = std::chrono::high_resolution_clock::now();
|
||||
auto estimateDLTOpt = triangulatePoint3<Cal3_S2>(
|
||||
cameras, noisyMeasurements, rank_tol, true, measurementNoise, false);
|
||||
durationDLTOpt += std::chrono::high_resolution_clock::now() - dltOptStart;
|
||||
|
||||
errorsLOST.row(i) = estimateLOST - landmark;
|
||||
errorsDLT.row(i) = estimateDLT - landmark;
|
||||
errorsDLTOpt.row(i) = estimateDLTOpt - landmark;
|
||||
}
|
||||
PrintCovarianceStats(errorsLOST, "LOST");
|
||||
PrintCovarianceStats(errorsDLT, "DLT");
|
||||
PrintCovarianceStats(errorsDLTOpt, "DLT_OPT");
|
||||
|
||||
PrintDuration(durationLOST, nrTrials, "LOST");
|
||||
PrintDuration(durationDLT, nrTrials, "DLT");
|
||||
PrintDuration(durationDLTOpt, nrTrials, "DLT_OPT");
|
||||
}
|
|
@ -1,4 +1,3 @@
|
|||
syntax: glob
|
||||
qrc_*cxx
|
||||
*.orig
|
||||
*.pyc
|
||||
|
@ -13,7 +12,7 @@ core
|
|||
core.*
|
||||
*.bak
|
||||
*~
|
||||
build*
|
||||
*build*
|
||||
*.moc.*
|
||||
*.moc
|
||||
ui_*
|
||||
|
@ -28,7 +27,12 @@ activity.png
|
|||
*.rej
|
||||
log
|
||||
patch
|
||||
*.patch
|
||||
a
|
||||
a.*
|
||||
lapack/testing
|
||||
lapack/reference
|
||||
.*project
|
||||
.settings
|
||||
Makefile
|
||||
!ci/build.gitlab-ci.yml
|
|
@ -0,0 +1,23 @@
|
|||
# This file is part of Eigen, a lightweight C++ template library
|
||||
# for linear algebra.
|
||||
#
|
||||
# Copyright (C) 2020 Arm Ltd. and Contributors
|
||||
#
|
||||
# This Source Code Form is subject to the terms of the Mozilla
|
||||
# Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
stages:
|
||||
- buildsmoketests
|
||||
- smoketests
|
||||
- build
|
||||
- test
|
||||
|
||||
variables:
|
||||
BUILDDIR: builddir
|
||||
EIGEN_CI_CMAKE_GENEATOR: "Ninja"
|
||||
|
||||
include:
|
||||
- "/ci/smoketests.gitlab-ci.yml"
|
||||
- "/ci/build.gitlab-ci.yml"
|
||||
- "/ci/test.gitlab-ci.yml"
|
|
@ -0,0 +1,69 @@
|
|||
<!--
|
||||
Please read this!
|
||||
|
||||
Before opening a new issue, make sure to search for keywords in the issues
|
||||
filtered by "bug::confirmed" or "bug::unconfirmed" and "bugzilla" label:
|
||||
|
||||
- https://gitlab.com/libeigen/eigen/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=bug%3A%3Aconfirmed
|
||||
- https://gitlab.com/libeigen/eigen/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=bug%3A%3Aunconfirmed
|
||||
- https://gitlab.com/libeigen/eigen/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=bugzilla
|
||||
|
||||
and verify the issue you're about to submit isn't a duplicate. -->
|
||||
|
||||
### Summary
|
||||
<!-- Summarize the bug encountered concisely. -->
|
||||
|
||||
### Environment
|
||||
<!-- Please provide your development environment here -->
|
||||
- **Operating System** : Windows/Linux
|
||||
- **Architecture** : x64/Arm64/PowerPC ...
|
||||
- **Eigen Version** : 3.3.9
|
||||
- **Compiler Version** : Gcc7.0
|
||||
- **Compile Flags** : -O3 -march=native
|
||||
- **Vector Extension** : SSE/AVX/NEON ...
|
||||
|
||||
### Minimal Example
|
||||
<!-- If possible, please create a minimal example here that exhibits the problematic behavior.
|
||||
You can also link to [godbolt](https://godbolt.org). But please note that you need to click
|
||||
the "Share" button in the top right-hand corner of the godbolt page where you reproduce the sample
|
||||
code to get the share link instead of in your browser address bar.
|
||||
|
||||
You can read [the guidelines on stackoverflow](https://stackoverflow.com/help/minimal-reproducible-example)
|
||||
on how to create a good minimal example. -->
|
||||
|
||||
```cpp
|
||||
//show your code here
|
||||
```
|
||||
|
||||
### Steps to reproduce
|
||||
<!-- Describe how one can reproduce the issue - this is very important. Please use an ordered list. -->
|
||||
|
||||
1. first step
|
||||
2. second step
|
||||
3. ...
|
||||
|
||||
### What is the current *bug* behavior?
|
||||
<!-- Describe what actually happens. -->
|
||||
|
||||
### What is the expected *correct* behavior?
|
||||
<!-- Describe what you should see instead. -->
|
||||
|
||||
### Relevant logs
|
||||
<!-- Add relevant code snippets or program output within blocks marked by " ``` " -->
|
||||
|
||||
<!-- OPTIONAL: remove this section if you are not reporting a compilation warning issue.-->
|
||||
### Warning Messages
|
||||
<!-- Show us the warning messages you got! -->
|
||||
|
||||
<!-- OPTIONAL: remove this section if you are not reporting a performance issue. -->
|
||||
### Benchmark scripts and results
|
||||
<!-- Please share any benchmark scripts - either standalone, or using [Google Benchmark](https://github.com/google/benchmark). -->
|
||||
|
||||
### Anything else that might help
|
||||
<!-- It will be better to provide us more information to help narrow down the cause.
|
||||
Including but not limited to the following:
|
||||
- lines of code that might help us diagnose the problem.
|
||||
- potential ways to address the issue.
|
||||
- last known working/first broken version (release number or commit hash). -->
|
||||
|
||||
- [ ] Have a plan to fix this issue.
|
|
@ -0,0 +1,7 @@
|
|||
### Describe the feature you would like to be implemented.
|
||||
|
||||
### Would such a feature be useful for other users? Why?
|
||||
|
||||
### Any hints on how to implement the requested feature?
|
||||
|
||||
### Additional resources
|
26
gtsam/3rdparty/Eigen/.gitlab/merge_request_templates/Merge Request Template.md
vendored
Normal file
26
gtsam/3rdparty/Eigen/.gitlab/merge_request_templates/Merge Request Template.md
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
<!--
|
||||
Thanks for contributing a merge request! Please name and fully describe your MR as you would for a commit message.
|
||||
If the MR fixes an issue, please include "Fixes #issue" in the commit message and the MR description.
|
||||
|
||||
In addition, we recommend that first-time contributors read our [contribution guidelines](https://eigen.tuxfamily.org/index.php?title=Contributing_to_Eigen) and [git page](https://eigen.tuxfamily.org/index.php?title=Git), which will help you submit a more standardized MR.
|
||||
|
||||
Before submitting the MR, you also need to complete the following checks:
|
||||
- Make one PR per feature/bugfix (don't mix multiple changes into one PR). Avoid committing unrelated changes.
|
||||
- Rebase before committing
|
||||
- For code changes, run the test suite (at least the tests that are likely affected by the change).
|
||||
See our [test guidelines](https://eigen.tuxfamily.org/index.php?title=Tests).
|
||||
- If possible, add a test (both for bug-fixes as well as new features)
|
||||
- Make sure new features are documented
|
||||
|
||||
Note that we are a team of volunteers; we appreciate your patience during the review process.
|
||||
|
||||
Again, thanks for contributing! -->
|
||||
|
||||
### Reference issue
|
||||
<!-- You can link to a specific issue using the gitlab syntax #<issue number> -->
|
||||
|
||||
### What does this implement/fix?
|
||||
<!--Please explain your changes.-->
|
||||
|
||||
### Additional information
|
||||
<!--Any additional information you think is important.-->
|
|
@ -1,33 +0,0 @@
|
|||
2db9468678c6480c9633b6272ff0e3599d1e11a3 2.0-beta3
|
||||
375224817dce669b6fa31d920d4c895a63fabf32 2.0-beta1
|
||||
3b8120f077865e2a072e10f5be33e1d942b83a06 2.0-rc1
|
||||
19dfc0e7666bcee26f7a49eb42f39a0280a3485e 2.0-beta5
|
||||
7a7d8a9526f003ffa2430dfb0c2c535b5add3023 2.0-beta4
|
||||
7d14ad088ac23769c349518762704f0257f6a39b 2.0.1
|
||||
b9d48561579fd7d4c05b2aa42235dc9de6484bf2 2.0-beta6
|
||||
e17630a40408243cb1a51ad0fe3a99beb75b7450 before-hg-migration
|
||||
eda654d4cda2210ce80719addcf854773e6dec5a 2.0.0
|
||||
ee9a7c468a9e73fab12f38f02bac24b07f29ed71 2.0-beta2
|
||||
d49097c25d8049e730c254a2fed725a240ce4858 after-hg-migration
|
||||
655348878731bcb5d9bbe0854077b052e75e5237 actual-start-from-scratch
|
||||
12a658962d4e6dfdc9a1c350fe7b69e36e70675c 3.0-beta1
|
||||
5c4180ad827b3f869b13b1d82f5a6ce617d6fcee 3.0-beta2
|
||||
7ae24ca6f3891d5ac58ddc7db60ad413c8d6ec35 3.0-beta3
|
||||
c40708b9088d622567fecc9208ad4a426621d364 3.0-beta4
|
||||
b6456624eae74f49ae8683d8e7b2882a2ca0342a 3.0-rc1
|
||||
a810d5dbab47acfe65b3350236efdd98f67d4d8a 3.1.0-alpha1
|
||||
304c88ca3affc16dd0b008b1104873986edd77af 3.1.0-alpha2
|
||||
920fc730b5930daae0a6dbe296d60ce2e3808215 3.1.0-beta1
|
||||
8383e883ebcc6f14695ff0b5e20bb631abab43fb 3.1.0-rc1
|
||||
bf4cb8c934fa3a79f45f1e629610f0225e93e493 3.1.0-rc2
|
||||
da195914abcc1d739027cbee7c52077aab30b336 3.2-beta1
|
||||
a8e0d153fc5e239ef8b06e3665f1f9e8cb8d49c8 before-evaluators
|
||||
09a8e21866106b49c5dec1d6d543e5794e82efa0 3.3-alpha1
|
||||
ce5a455b34c0a0ac3545a1497cb4a16c38ed90e8 3.3-beta1
|
||||
69d418c0699907bcd0bf9e0b3ba0a112ed091d85 3.3-beta2
|
||||
bef509908b9da05d0d07ffc0da105e2c8c6d3996 3.3-rc1
|
||||
04ab5fa4b241754afcf631117572276444c67239 3.3-rc2
|
||||
26667be4f70baf4f0d39e96f330714c87b399090 3.3.0
|
||||
f562a193118d4f40514e2f4a0ace6e974926ef06 3.3.1
|
||||
da9b4e14c2550e0d11078a3c39e6d56eba9905df 3.3.2
|
||||
67e894c6cd8f5f1f604b27d37ed47fdf012674ff 3.3.3
|
|
@ -1,6 +1,7 @@
|
|||
project(Eigen3)
|
||||
# cmake_minimum_require must be the first command of the file
|
||||
cmake_minimum_required(VERSION 3.5.0)
|
||||
|
||||
cmake_minimum_required(VERSION 2.8.5)
|
||||
project(Eigen3)
|
||||
|
||||
# guard against in-source builds
|
||||
|
||||
|
@ -8,6 +9,7 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
|
|||
message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ")
|
||||
endif()
|
||||
|
||||
|
||||
# Alias Eigen_*_DIR to Eigen3_*_DIR:
|
||||
|
||||
set(Eigen_SOURCE_DIR ${Eigen3_SOURCE_DIR})
|
||||
|
@ -19,16 +21,9 @@ if (NOT CMAKE_BUILD_TYPE)
|
|||
set(CMAKE_BUILD_TYPE "Release")
|
||||
endif()
|
||||
|
||||
string(TOLOWER "${CMAKE_BUILD_TYPE}" cmake_build_type_tolower)
|
||||
if( NOT cmake_build_type_tolower STREQUAL "debug"
|
||||
AND NOT cmake_build_type_tolower STREQUAL "release"
|
||||
AND NOT cmake_build_type_tolower STREQUAL "relwithdebinfo")
|
||||
message(FATAL_ERROR "Unknown build type \"${CMAKE_BUILD_TYPE}\". Allowed values are Debug, Release, RelWithDebInfo (case-insensitive).")
|
||||
endif()
|
||||
|
||||
|
||||
#############################################################################
|
||||
# retrieve version infomation #
|
||||
# retrieve version information #
|
||||
#############################################################################
|
||||
|
||||
# automatically parse the version number
|
||||
|
@ -41,29 +36,28 @@ string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen_minor_
|
|||
set(EIGEN_MINOR_VERSION "${CMAKE_MATCH_1}")
|
||||
set(EIGEN_VERSION_NUMBER ${EIGEN_WORLD_VERSION}.${EIGEN_MAJOR_VERSION}.${EIGEN_MINOR_VERSION})
|
||||
|
||||
# if we are not in a mercurial clone
|
||||
if(IS_DIRECTORY ${CMAKE_SOURCE_DIR}/.hg)
|
||||
# if the mercurial program is absent or this will leave the EIGEN_HG_CHANGESET string empty,
|
||||
# if we are not in a git clone
|
||||
if(IS_DIRECTORY ${CMAKE_SOURCE_DIR}/.git)
|
||||
# if the git program is absent or this will leave the EIGEN_GIT_REVNUM string empty,
|
||||
# but won't stop CMake.
|
||||
execute_process(COMMAND hg tip -R ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE EIGEN_HGTIP_OUTPUT)
|
||||
execute_process(COMMAND hg branch -R ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE EIGEN_BRANCH_OUTPUT)
|
||||
execute_process(COMMAND git ls-remote --refs -q ${CMAKE_SOURCE_DIR} HEAD OUTPUT_VARIABLE EIGEN_GIT_OUTPUT)
|
||||
endif()
|
||||
|
||||
# if this is the default (aka development) branch, extract the mercurial changeset number from the hg tip output...
|
||||
if(EIGEN_BRANCH_OUTPUT MATCHES "default")
|
||||
string(REGEX MATCH "^changeset: *[0-9]*:([0-9;a-f]+).*" EIGEN_HG_CHANGESET_MATCH "${EIGEN_HGTIP_OUTPUT}")
|
||||
set(EIGEN_HG_CHANGESET "${CMAKE_MATCH_1}")
|
||||
endif(EIGEN_BRANCH_OUTPUT MATCHES "default")
|
||||
# extract the git rev number from the git output...
|
||||
if(EIGEN_GIT_OUTPUT)
|
||||
string(REGEX MATCH "^([0-9;a-f]+).*" EIGEN_GIT_CHANGESET_MATCH "${EIGEN_GIT_OUTPUT}")
|
||||
set(EIGEN_GIT_REVNUM "${CMAKE_MATCH_1}")
|
||||
endif()
|
||||
#...and show it next to the version number
|
||||
if(EIGEN_HG_CHANGESET)
|
||||
set(EIGEN_VERSION "${EIGEN_VERSION_NUMBER} (mercurial changeset ${EIGEN_HG_CHANGESET})")
|
||||
else(EIGEN_HG_CHANGESET)
|
||||
if(EIGEN_GIT_REVNUM)
|
||||
set(EIGEN_VERSION "${EIGEN_VERSION_NUMBER} (git rev ${EIGEN_GIT_REVNUM})")
|
||||
else()
|
||||
set(EIGEN_VERSION "${EIGEN_VERSION_NUMBER}")
|
||||
endif(EIGEN_HG_CHANGESET)
|
||||
|
||||
endif()
|
||||
|
||||
include(CheckCXXCompilerFlag)
|
||||
include(GNUInstallDirs)
|
||||
include(CMakeDependentOption)
|
||||
|
||||
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
|
||||
|
||||
|
@ -78,7 +72,7 @@ macro(ei_add_cxx_compiler_flag FLAG)
|
|||
if(COMPILER_SUPPORT_${SFLAG})
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG}")
|
||||
endif()
|
||||
endmacro(ei_add_cxx_compiler_flag)
|
||||
endmacro()
|
||||
|
||||
check_cxx_compiler_flag("-std=c++11" EIGEN_COMPILER_SUPPORT_CPP11)
|
||||
|
||||
|
@ -94,6 +88,9 @@ else()
|
|||
ei_add_cxx_compiler_flag("-std=c++03")
|
||||
endif()
|
||||
|
||||
# Determine if we should build shared libraries on this platform.
|
||||
get_cmake_property(EIGEN_BUILD_SHARED_LIBS TARGET_SUPPORTS_SHARED_LIBS)
|
||||
|
||||
#############################################################################
|
||||
# find how to link to the standard libraries #
|
||||
#############################################################################
|
||||
|
@ -134,7 +131,7 @@ if(NOT WIN32 OR NOT CMAKE_HOST_SYSTEM_NAME MATCHES Windows)
|
|||
option(EIGEN_BUILD_PKGCONFIG "Build pkg-config .pc file for Eigen" ON)
|
||||
endif()
|
||||
|
||||
set(CMAKE_INCLUDE_CURRENT_DIR ON)
|
||||
set(CMAKE_INCLUDE_CURRENT_DIR OFF)
|
||||
|
||||
option(EIGEN_SPLIT_LARGE_TESTS "Split large tests into smaller executables" ON)
|
||||
|
||||
|
@ -158,7 +155,7 @@ if(NOT MSVC)
|
|||
ei_add_cxx_compiler_flag("-Wall")
|
||||
ei_add_cxx_compiler_flag("-Wextra")
|
||||
#ei_add_cxx_compiler_flag("-Weverything") # clang
|
||||
|
||||
|
||||
ei_add_cxx_compiler_flag("-Wundef")
|
||||
ei_add_cxx_compiler_flag("-Wcast-align")
|
||||
ei_add_cxx_compiler_flag("-Wchar-subscripts")
|
||||
|
@ -173,29 +170,25 @@ if(NOT MSVC)
|
|||
ei_add_cxx_compiler_flag("-Wc++11-extensions")
|
||||
ei_add_cxx_compiler_flag("-Wdouble-promotion")
|
||||
# ei_add_cxx_compiler_flag("-Wconversion")
|
||||
|
||||
# -Wshadow is insanely too strict with gcc, hopefully it will become usable with gcc 6
|
||||
# if(NOT CMAKE_COMPILER_IS_GNUCXX OR (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "5.0.0"))
|
||||
if(NOT CMAKE_COMPILER_IS_GNUCXX)
|
||||
ei_add_cxx_compiler_flag("-Wshadow")
|
||||
endif()
|
||||
|
||||
|
||||
ei_add_cxx_compiler_flag("-Wshadow")
|
||||
|
||||
ei_add_cxx_compiler_flag("-Wno-psabi")
|
||||
ei_add_cxx_compiler_flag("-Wno-variadic-macros")
|
||||
ei_add_cxx_compiler_flag("-Wno-long-long")
|
||||
|
||||
|
||||
ei_add_cxx_compiler_flag("-fno-check-new")
|
||||
ei_add_cxx_compiler_flag("-fno-common")
|
||||
ei_add_cxx_compiler_flag("-fstrict-aliasing")
|
||||
ei_add_cxx_compiler_flag("-wd981") # disable ICC's "operands are evaluated in unspecified order" remark
|
||||
ei_add_cxx_compiler_flag("-wd2304") # disable ICC's "warning #2304: non-explicit constructor with single argument may cause implicit type conversion" produced by -Wnon-virtual-dtor
|
||||
|
||||
|
||||
|
||||
|
||||
# The -ansi flag must be added last, otherwise it is also used as a linker flag by check_cxx_compiler_flag making it fails
|
||||
# Moreover we should not set both -strict-ansi and -ansi
|
||||
check_cxx_compiler_flag("-strict-ansi" COMPILER_SUPPORT_STRICTANSI)
|
||||
ei_add_cxx_compiler_flag("-Qunused-arguments") # disable clang warning: argument unused during compilation: '-ansi'
|
||||
|
||||
|
||||
if(COMPILER_SUPPORT_STRICTANSI)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -strict-ansi")
|
||||
else()
|
||||
|
@ -206,7 +199,7 @@ if(NOT MSVC)
|
|||
ei_add_cxx_compiler_flag("-pie")
|
||||
ei_add_cxx_compiler_flag("-fPIE")
|
||||
endif()
|
||||
|
||||
|
||||
set(CMAKE_REQUIRED_FLAGS "")
|
||||
|
||||
option(EIGEN_TEST_SSE2 "Enable/Disable SSE2 in tests/examples" OFF)
|
||||
|
@ -251,12 +244,30 @@ if(NOT MSVC)
|
|||
message(STATUS "Enabling FMA in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_AVX2 "Enable/Disable AVX2 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_AVX2)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2 -mfma")
|
||||
message(STATUS "Enabling AVX2 in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_AVX512 "Enable/Disable AVX512 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_AVX512)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -fabi-version=6 -DEIGEN_ENABLE_AVX512")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mfma")
|
||||
if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fabi-version=6")
|
||||
endif()
|
||||
message(STATUS "Enabling AVX512 in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_AVX512DQ "Enable/Disable AVX512DQ in tests/examples" OFF)
|
||||
if(EIGEN_TEST_AVX512DQ)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512dq")
|
||||
if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fabi-version=6")
|
||||
endif()
|
||||
message(STATUS "Enabling AVX512DQ in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_F16C "Enable/Disable F16C in tests/examples" OFF)
|
||||
if(EIGEN_TEST_F16C)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mf16c")
|
||||
|
@ -275,6 +286,12 @@ if(NOT MSVC)
|
|||
message(STATUS "Enabling VSX in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_MSA "Enable/Disable MSA in tests/examples" OFF)
|
||||
if(EIGEN_TEST_MSA)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mmsa")
|
||||
message(STATUS "Enabling MSA in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_NEON "Enable/Disable Neon in tests/examples" OFF)
|
||||
if(EIGEN_TEST_NEON)
|
||||
if(EIGEN_TEST_FMA)
|
||||
|
@ -292,12 +309,18 @@ if(NOT MSVC)
|
|||
message(STATUS "Enabling NEON in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_ZVECTOR "Enable/Disable S390X(zEC13) ZVECTOR in tests/examples" OFF)
|
||||
if(EIGEN_TEST_ZVECTOR)
|
||||
option(EIGEN_TEST_Z13 "Enable/Disable S390X(zEC13) ZVECTOR in tests/examples" OFF)
|
||||
if(EIGEN_TEST_Z13)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=z13 -mzvector")
|
||||
message(STATUS "Enabling S390X(zEC13) ZVECTOR in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_Z14 "Enable/Disable S390X(zEC14) ZVECTOR in tests/examples" OFF)
|
||||
if(EIGEN_TEST_Z14)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=z14 -mzvector")
|
||||
message(STATUS "Enabling S390X(zEC13) ZVECTOR in tests/examples")
|
||||
endif()
|
||||
|
||||
check_cxx_compiler_flag("-fopenmp" COMPILER_SUPPORT_OPENMP)
|
||||
if(COMPILER_SUPPORT_OPENMP)
|
||||
option(EIGEN_TEST_OPENMP "Enable/Disable OpenMP in tests/examples" OFF)
|
||||
|
@ -307,7 +330,7 @@ if(NOT MSVC)
|
|||
endif()
|
||||
endif()
|
||||
|
||||
else(NOT MSVC)
|
||||
else()
|
||||
|
||||
# C4127 - conditional expression is constant
|
||||
# C4714 - marked as __forceinline not inlined (I failed to deactivate it selectively)
|
||||
|
@ -315,7 +338,7 @@ else(NOT MSVC)
|
|||
# because we are oftentimes returning objects that have a destructor or may
|
||||
# throw exceptions - in particular in the unit tests we are throwing extra many
|
||||
# exceptions to cover indexing errors.
|
||||
# C4505 - unreferenced local function has been removed (impossible to deactive selectively)
|
||||
# C4505 - unreferenced local function has been removed (impossible to deactivate selectively)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /wd4127 /wd4505 /wd4714")
|
||||
|
||||
# replace all /Wx by /W4
|
||||
|
@ -335,10 +358,23 @@ else(NOT MSVC)
|
|||
if(NOT CMAKE_CL_64)
|
||||
# arch is not supported on 64 bit systems, SSE is enabled automatically.
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:SSE2")
|
||||
endif(NOT CMAKE_CL_64)
|
||||
endif()
|
||||
message(STATUS "Enabling SSE2 in tests/examples")
|
||||
endif(EIGEN_TEST_SSE2)
|
||||
endif(NOT MSVC)
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_AVX "Enable/Disable AVX in tests/examples" OFF)
|
||||
if(EIGEN_TEST_AVX)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX")
|
||||
message(STATUS "Enabling AVX in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_FMA "Enable/Disable FMA/AVX2 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_FMA AND NOT EIGEN_TEST_NEON)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2")
|
||||
message(STATUS "Enabling FMA/AVX2 in tests/examples")
|
||||
endif()
|
||||
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_NO_EXPLICIT_VECTORIZATION "Disable explicit vectorization in tests/examples" OFF)
|
||||
option(EIGEN_TEST_X87 "Force using X87 instructions. Implies no vectorization." OFF)
|
||||
|
@ -382,7 +418,7 @@ endif()
|
|||
|
||||
set(EIGEN_CUDA_COMPUTE_ARCH 30 CACHE STRING "The CUDA compute architecture level to target when compiling CUDA code")
|
||||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||
|
||||
# Backward compatibility support for EIGEN_INCLUDE_INSTALL_DIR
|
||||
if(EIGEN_INCLUDE_INSTALL_DIR)
|
||||
|
@ -391,22 +427,28 @@ endif()
|
|||
|
||||
if(EIGEN_INCLUDE_INSTALL_DIR AND NOT INCLUDE_INSTALL_DIR)
|
||||
set(INCLUDE_INSTALL_DIR ${EIGEN_INCLUDE_INSTALL_DIR}
|
||||
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen header files are installed")
|
||||
CACHE PATH "The directory relative to CMAKE_INSTALL_PREFIX where Eigen header files are installed")
|
||||
else()
|
||||
set(INCLUDE_INSTALL_DIR
|
||||
"${CMAKE_INSTALL_INCLUDEDIR}/eigen3"
|
||||
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen header files are installed"
|
||||
CACHE PATH "The directory relative to CMAKE_INSTALL_PREFIX where Eigen header files are installed"
|
||||
)
|
||||
endif()
|
||||
set(CMAKEPACKAGE_INSTALL_DIR
|
||||
"${CMAKE_INSTALL_DATADIR}/eigen3/cmake"
|
||||
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen3Config.cmake is installed"
|
||||
CACHE PATH "The directory relative to CMAKE_INSTALL_PREFIX where Eigen3Config.cmake is installed"
|
||||
)
|
||||
set(PKGCONFIG_INSTALL_DIR
|
||||
"${CMAKE_INSTALL_DATADIR}/pkgconfig"
|
||||
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where eigen3.pc is installed"
|
||||
CACHE PATH "The directory relative to CMAKE_INSTALL_PREFIX where eigen3.pc is installed"
|
||||
)
|
||||
|
||||
foreach(var INCLUDE_INSTALL_DIR CMAKEPACKAGE_INSTALL_DIR PKGCONFIG_INSTALL_DIR)
|
||||
# If an absolute path is specified, make it relative to "{CMAKE_INSTALL_PREFIX}".
|
||||
if(IS_ABSOLUTE "${${var}}")
|
||||
file(RELATIVE_PATH "${var}" "${CMAKE_INSTALL_PREFIX}" "${${var}}")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
# similar to set_target_properties but append the property instead of overwriting it
|
||||
macro(ei_add_target_property target prop value)
|
||||
|
@ -415,9 +457,9 @@ macro(ei_add_target_property target prop value)
|
|||
# if the property wasn't previously set, ${previous} is now "previous-NOTFOUND" which cmake allows catching with plain if()
|
||||
if(NOT previous)
|
||||
set(previous "")
|
||||
endif(NOT previous)
|
||||
endif()
|
||||
set_target_properties(${target} PROPERTIES ${prop} "${previous} ${value}")
|
||||
endmacro(ei_add_target_property)
|
||||
endmacro()
|
||||
|
||||
install(FILES
|
||||
signature_of_eigen3_matrix_library
|
||||
|
@ -431,9 +473,14 @@ if(EIGEN_BUILD_PKGCONFIG)
|
|||
)
|
||||
endif()
|
||||
|
||||
add_subdirectory(Eigen)
|
||||
install(DIRECTORY Eigen DESTINATION ${INCLUDE_INSTALL_DIR} COMPONENT Devel)
|
||||
|
||||
|
||||
option(EIGEN_BUILD_DOC "Enable creation of Eigen documentation" ON)
|
||||
if(EIGEN_BUILD_DOC)
|
||||
add_subdirectory(doc EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
add_subdirectory(doc EXCLUDE_FROM_ALL)
|
||||
|
||||
option(BUILD_TESTING "Enable creation of Eigen tests." ON)
|
||||
if(BUILD_TESTING)
|
||||
|
@ -444,6 +491,8 @@ if(BUILD_TESTING)
|
|||
else()
|
||||
add_subdirectory(test EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
add_subdirectory(failtest)
|
||||
endif()
|
||||
|
||||
if(EIGEN_LEAVE_TEST_IN_ALL_TARGET)
|
||||
|
@ -456,9 +505,32 @@ endif()
|
|||
|
||||
# add SYCL
|
||||
option(EIGEN_TEST_SYCL "Add Sycl support." OFF)
|
||||
option(EIGEN_SYCL_TRISYCL "Use the triSYCL Sycl implementation (ComputeCPP by default)." OFF)
|
||||
if(EIGEN_TEST_SYCL)
|
||||
set (CMAKE_MODULE_PATH "${CMAKE_ROOT}/Modules" "cmake/Modules/" "${CMAKE_MODULE_PATH}")
|
||||
include(FindComputeCpp)
|
||||
find_package(Threads REQUIRED)
|
||||
if(EIGEN_SYCL_TRISYCL)
|
||||
message(STATUS "Using triSYCL")
|
||||
include(FindTriSYCL)
|
||||
else()
|
||||
message(STATUS "Using ComputeCPP SYCL")
|
||||
include(FindComputeCpp)
|
||||
set(COMPUTECPP_DRIVER_DEFAULT_VALUE OFF)
|
||||
if (NOT MSVC)
|
||||
set(COMPUTECPP_DRIVER_DEFAULT_VALUE ON)
|
||||
endif()
|
||||
option(COMPUTECPP_USE_COMPILER_DRIVER
|
||||
"Use ComputeCpp driver instead of a 2 steps compilation"
|
||||
${COMPUTECPP_DRIVER_DEFAULT_VALUE}
|
||||
)
|
||||
endif(EIGEN_SYCL_TRISYCL)
|
||||
option(EIGEN_DONT_VECTORIZE_SYCL "Don't use vectorisation in the SYCL tests." OFF)
|
||||
if(EIGEN_DONT_VECTORIZE_SYCL)
|
||||
message(STATUS "Disabling SYCL vectorization in tests/examples")
|
||||
# When disabling SYCL vectorization, also disable Eigen default vectorization
|
||||
add_definitions(-DEIGEN_DONT_VECTORIZE=1)
|
||||
add_definitions(-DEIGEN_DONT_VECTORIZE_SYCL=1)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
add_subdirectory(unsupported)
|
||||
|
@ -471,11 +543,11 @@ add_subdirectory(scripts EXCLUDE_FROM_ALL)
|
|||
# TODO: consider also replacing EIGEN_BUILD_BTL by a custom target "make btl"?
|
||||
if(EIGEN_BUILD_BTL)
|
||||
add_subdirectory(bench/btl EXCLUDE_FROM_ALL)
|
||||
endif(EIGEN_BUILD_BTL)
|
||||
endif()
|
||||
|
||||
if(NOT WIN32)
|
||||
add_subdirectory(bench/spbench EXCLUDE_FROM_ALL)
|
||||
endif(NOT WIN32)
|
||||
endif()
|
||||
|
||||
configure_file(scripts/cdashtesting.cmake.in cdashtesting.cmake @ONLY)
|
||||
|
||||
|
@ -487,37 +559,32 @@ message(STATUS "")
|
|||
message(STATUS "Configured Eigen ${EIGEN_VERSION_NUMBER}")
|
||||
message(STATUS "")
|
||||
|
||||
option(EIGEN_FAILTEST "Enable failtests." OFF)
|
||||
if(EIGEN_FAILTEST)
|
||||
add_subdirectory(failtest)
|
||||
endif()
|
||||
|
||||
string(TOLOWER "${CMAKE_GENERATOR}" cmake_generator_tolower)
|
||||
if(cmake_generator_tolower MATCHES "makefile")
|
||||
message(STATUS "Some things you can do now:")
|
||||
message(STATUS "--------------+--------------------------------------------------------------")
|
||||
message(STATUS "Command | Description")
|
||||
message(STATUS "--------------+--------------------------------------------------------------")
|
||||
message(STATUS "make install | Install Eigen. Headers will be installed to:")
|
||||
message(STATUS " | <CMAKE_INSTALL_PREFIX>/<INCLUDE_INSTALL_DIR>")
|
||||
message(STATUS " | Using the following values:")
|
||||
message(STATUS " | CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
|
||||
message(STATUS " | INCLUDE_INSTALL_DIR: ${INCLUDE_INSTALL_DIR}")
|
||||
message(STATUS " | Change the install location of Eigen headers using:")
|
||||
message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourprefix")
|
||||
message(STATUS " | Or:")
|
||||
message(STATUS " | cmake . -DINCLUDE_INSTALL_DIR=yourdir")
|
||||
message(STATUS "make doc | Generate the API documentation, requires Doxygen & LaTeX")
|
||||
message(STATUS "make check | Build and run the unit-tests. Read this page:")
|
||||
message(STATUS " | http://eigen.tuxfamily.org/index.php?title=Tests")
|
||||
message(STATUS "make blas | Build BLAS library (not the same thing as Eigen)")
|
||||
message(STATUS "make uninstall| Removes files installed by make install")
|
||||
message(STATUS "--------------+--------------------------------------------------------------")
|
||||
message(STATUS "Available targets (use: make TARGET):")
|
||||
else()
|
||||
message(STATUS "To build/run the unit tests, read this page:")
|
||||
message(STATUS " http://eigen.tuxfamily.org/index.php?title=Tests")
|
||||
message(STATUS "Available targets (use: cmake --build . --target TARGET):")
|
||||
endif()
|
||||
|
||||
message(STATUS "---------+--------------------------------------------------------------")
|
||||
message(STATUS "Target | Description")
|
||||
message(STATUS "---------+--------------------------------------------------------------")
|
||||
message(STATUS "install | Install Eigen. Headers will be installed to:")
|
||||
message(STATUS " | <CMAKE_INSTALL_PREFIX>/<INCLUDE_INSTALL_DIR>")
|
||||
message(STATUS " | Using the following values:")
|
||||
message(STATUS " | CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
|
||||
message(STATUS " | INCLUDE_INSTALL_DIR: ${INCLUDE_INSTALL_DIR}")
|
||||
message(STATUS " | Change the install location of Eigen headers using:")
|
||||
message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourprefix")
|
||||
message(STATUS " | Or:")
|
||||
message(STATUS " | cmake . -DINCLUDE_INSTALL_DIR=yourdir")
|
||||
message(STATUS "doc | Generate the API documentation, requires Doxygen & LaTeX")
|
||||
if(BUILD_TESTING)
|
||||
message(STATUS "check | Build and run the unit-tests. Read this page:")
|
||||
message(STATUS " | http://eigen.tuxfamily.org/index.php?title=Tests")
|
||||
endif()
|
||||
message(STATUS "blas | Build BLAS library (not the same thing as Eigen)")
|
||||
message(STATUS "uninstall| Remove files installed by the install target")
|
||||
message(STATUS "---------+--------------------------------------------------------------")
|
||||
message(STATUS "")
|
||||
|
||||
|
||||
|
@ -529,82 +596,48 @@ set ( EIGEN_DEFINITIONS "")
|
|||
set ( EIGEN_INCLUDE_DIR "${CMAKE_INSTALL_PREFIX}/${INCLUDE_INSTALL_DIR}" )
|
||||
set ( EIGEN_ROOT_DIR ${CMAKE_INSTALL_PREFIX} )
|
||||
|
||||
# Interface libraries require at least CMake 3.0
|
||||
if (NOT CMAKE_VERSION VERSION_LESS 3.0)
|
||||
include (CMakePackageConfigHelpers)
|
||||
include (CMakePackageConfigHelpers)
|
||||
|
||||
# Imported target support
|
||||
add_library (eigen INTERFACE)
|
||||
# Imported target support
|
||||
add_library (eigen INTERFACE)
|
||||
add_library (Eigen3::Eigen ALIAS eigen)
|
||||
target_compile_definitions (eigen INTERFACE ${EIGEN_DEFINITIONS})
|
||||
target_include_directories (eigen INTERFACE
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
|
||||
$<INSTALL_INTERFACE:${INCLUDE_INSTALL_DIR}>
|
||||
)
|
||||
|
||||
target_compile_definitions (eigen INTERFACE ${EIGEN_DEFINITIONS})
|
||||
target_include_directories (eigen INTERFACE
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
|
||||
$<INSTALL_INTERFACE:${INCLUDE_INSTALL_DIR}>
|
||||
)
|
||||
# Export as title case Eigen
|
||||
set_target_properties (eigen PROPERTIES EXPORT_NAME Eigen)
|
||||
|
||||
# Export as title case Eigen
|
||||
set_target_properties (eigen PROPERTIES EXPORT_NAME Eigen)
|
||||
install (TARGETS eigen EXPORT Eigen3Targets)
|
||||
|
||||
install (TARGETS eigen EXPORT Eigen3Targets)
|
||||
configure_package_config_file (
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/cmake/Eigen3Config.cmake.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/Eigen3Config.cmake
|
||||
PATH_VARS EIGEN_INCLUDE_DIR EIGEN_ROOT_DIR
|
||||
INSTALL_DESTINATION ${CMAKEPACKAGE_INSTALL_DIR}
|
||||
NO_CHECK_REQUIRED_COMPONENTS_MACRO # Eigen does not provide components
|
||||
)
|
||||
# Remove CMAKE_SIZEOF_VOID_P from Eigen3ConfigVersion.cmake since Eigen does
|
||||
# not depend on architecture specific settings or libraries. More
|
||||
# specifically, an Eigen3Config.cmake generated from a 64 bit target can be
|
||||
# used for 32 bit targets as well (and vice versa).
|
||||
set (_Eigen3_CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P})
|
||||
unset (CMAKE_SIZEOF_VOID_P)
|
||||
write_basic_package_version_file (Eigen3ConfigVersion.cmake
|
||||
VERSION ${EIGEN_VERSION_NUMBER}
|
||||
COMPATIBILITY SameMajorVersion)
|
||||
set (CMAKE_SIZEOF_VOID_P ${_Eigen3_CMAKE_SIZEOF_VOID_P})
|
||||
|
||||
configure_package_config_file (
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/cmake/Eigen3Config.cmake.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/Eigen3Config.cmake
|
||||
PATH_VARS EIGEN_INCLUDE_DIR EIGEN_ROOT_DIR
|
||||
INSTALL_DESTINATION ${CMAKEPACKAGE_INSTALL_DIR}
|
||||
NO_CHECK_REQUIRED_COMPONENTS_MACRO # Eigen does not provide components
|
||||
)
|
||||
# Remove CMAKE_SIZEOF_VOID_P from Eigen3ConfigVersion.cmake since Eigen does
|
||||
# not depend on architecture specific settings or libraries. More
|
||||
# specifically, an Eigen3Config.cmake generated from a 64 bit target can be
|
||||
# used for 32 bit targets as well (and vice versa).
|
||||
set (_Eigen3_CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P})
|
||||
unset (CMAKE_SIZEOF_VOID_P)
|
||||
write_basic_package_version_file (Eigen3ConfigVersion.cmake
|
||||
VERSION ${EIGEN_VERSION_NUMBER}
|
||||
COMPATIBILITY SameMajorVersion)
|
||||
set (CMAKE_SIZEOF_VOID_P ${_Eigen3_CMAKE_SIZEOF_VOID_P})
|
||||
# The Eigen target will be located in the Eigen3 namespace. Other CMake
|
||||
# targets can refer to it using Eigen3::Eigen.
|
||||
export (TARGETS eigen NAMESPACE Eigen3:: FILE Eigen3Targets.cmake)
|
||||
# Export Eigen3 package to CMake registry such that it can be easily found by
|
||||
# CMake even if it has not been installed to a standard directory.
|
||||
export (PACKAGE Eigen3)
|
||||
|
||||
# The Eigen target will be located in the Eigen3 namespace. Other CMake
|
||||
# targets can refer to it using Eigen3::Eigen.
|
||||
export (TARGETS eigen NAMESPACE Eigen3:: FILE Eigen3Targets.cmake)
|
||||
# Export Eigen3 package to CMake registry such that it can be easily found by
|
||||
# CMake even if it has not been installed to a standard directory.
|
||||
export (PACKAGE Eigen3)
|
||||
|
||||
install (EXPORT Eigen3Targets NAMESPACE Eigen3:: DESTINATION ${CMAKEPACKAGE_INSTALL_DIR})
|
||||
|
||||
else (NOT CMAKE_VERSION VERSION_LESS 3.0)
|
||||
# Fallback to legacy Eigen3Config.cmake without the imported target
|
||||
|
||||
# If CMakePackageConfigHelpers module is available (CMake >= 2.8.8)
|
||||
# create a relocatable Config file, otherwise leave the hardcoded paths
|
||||
include(CMakePackageConfigHelpers OPTIONAL RESULT_VARIABLE CPCH_PATH)
|
||||
|
||||
if(CPCH_PATH)
|
||||
configure_package_config_file (
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/cmake/Eigen3ConfigLegacy.cmake.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/Eigen3Config.cmake
|
||||
PATH_VARS EIGEN_INCLUDE_DIR EIGEN_ROOT_DIR
|
||||
INSTALL_DESTINATION ${CMAKEPACKAGE_INSTALL_DIR}
|
||||
NO_CHECK_REQUIRED_COMPONENTS_MACRO # Eigen does not provide components
|
||||
)
|
||||
else()
|
||||
# The PACKAGE_* variables are defined by the configure_package_config_file
|
||||
# but without it we define them manually to the hardcoded paths
|
||||
set(PACKAGE_INIT "")
|
||||
set(PACKAGE_EIGEN_INCLUDE_DIR ${EIGEN_INCLUDE_DIR})
|
||||
set(PACKAGE_EIGEN_ROOT_DIR ${EIGEN_ROOT_DIR})
|
||||
configure_file ( ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Eigen3ConfigLegacy.cmake.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/Eigen3Config.cmake
|
||||
@ONLY ESCAPE_QUOTES )
|
||||
endif()
|
||||
|
||||
write_basic_package_version_file( Eigen3ConfigVersion.cmake
|
||||
VERSION ${EIGEN_VERSION_NUMBER}
|
||||
COMPATIBILITY SameMajorVersion )
|
||||
|
||||
endif (NOT CMAKE_VERSION VERSION_LESS 3.0)
|
||||
install (EXPORT Eigen3Targets NAMESPACE Eigen3:: DESTINATION ${CMAKEPACKAGE_INSTALL_DIR})
|
||||
|
||||
install ( FILES ${CMAKE_CURRENT_SOURCE_DIR}/cmake/UseEigen3.cmake
|
||||
${CMAKE_CURRENT_BINARY_DIR}/Eigen3Config.cmake
|
||||
|
@ -614,3 +647,7 @@ install ( FILES ${CMAKE_CURRENT_SOURCE_DIR}/cmake/UseEigen3.cmake
|
|||
# Add uninstall target
|
||||
add_custom_target ( uninstall
|
||||
COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_SOURCE_DIR}/cmake/EigenUninstall.cmake)
|
||||
|
||||
if (EIGEN_SPLIT_TESTSUITE)
|
||||
ei_split_testsuite("${EIGEN_SPLIT_TESTSUITE}")
|
||||
endif()
|
||||
|
|
|
@ -0,0 +1,203 @@
|
|||
/*
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
|
@ -23,4 +23,4 @@
|
|||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
*/
|
||||
|
|
|
@ -1,52 +1,51 @@
|
|||
Minpack Copyright Notice (1999) University of Chicago. All rights reserved
|
||||
|
||||
Redistribution and use in source and binary forms, with or
|
||||
without modification, are permitted provided that the
|
||||
following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials
|
||||
provided with the distribution.
|
||||
|
||||
3. The end-user documentation included with the
|
||||
redistribution, if any, must include the following
|
||||
acknowledgment:
|
||||
|
||||
"This product includes software developed by the
|
||||
University of Chicago, as Operator of Argonne National
|
||||
Laboratory.
|
||||
|
||||
Alternately, this acknowledgment may appear in the software
|
||||
itself, if and wherever such third-party acknowledgments
|
||||
normally appear.
|
||||
|
||||
4. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
|
||||
WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
|
||||
UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
|
||||
THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
|
||||
OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
|
||||
OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
|
||||
USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
|
||||
THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
|
||||
DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
|
||||
UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
|
||||
BE CORRECTED.
|
||||
|
||||
5. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
|
||||
HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
|
||||
ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
|
||||
INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
|
||||
ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
|
||||
SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
|
||||
(INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
|
||||
EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
|
||||
POSSIBILITY OF SUCH LOSS OR DAMAGES.
|
||||
|
||||
Minpack Copyright Notice (1999) University of Chicago. All rights reserved
|
||||
|
||||
Redistribution and use in source and binary forms, with or
|
||||
without modification, are permitted provided that the
|
||||
following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials
|
||||
provided with the distribution.
|
||||
|
||||
3. The end-user documentation included with the
|
||||
redistribution, if any, must include the following
|
||||
acknowledgment:
|
||||
|
||||
"This product includes software developed by the
|
||||
University of Chicago, as Operator of Argonne National
|
||||
Laboratory.
|
||||
|
||||
Alternately, this acknowledgment may appear in the software
|
||||
itself, if and wherever such third-party acknowledgments
|
||||
normally appear.
|
||||
|
||||
4. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
|
||||
WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
|
||||
UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
|
||||
THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
|
||||
OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
|
||||
OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
|
||||
USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
|
||||
THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
|
||||
DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
|
||||
UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
|
||||
BE CORRECTED.
|
||||
|
||||
5. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
|
||||
HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
|
||||
ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
|
||||
INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
|
||||
ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
|
||||
SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
|
||||
(INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
|
||||
EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
|
||||
POSSIBILITY OF SUCH LOSS OR DAMAGES.
|
||||
|
|
|
@ -2,12 +2,16 @@
|
|||
## Then modify the CMakeLists.txt file in the root directory of your
|
||||
## project to incorporate the testing dashboard.
|
||||
## # The following are required to uses Dart and the Cdash dashboard
|
||||
## ENABLE_TESTING()
|
||||
## INCLUDE(CTest)
|
||||
set(CTEST_PROJECT_NAME "Eigen 3.3")
|
||||
## enable_testing()
|
||||
## include(CTest)
|
||||
set(CTEST_PROJECT_NAME "Eigen")
|
||||
set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC")
|
||||
|
||||
set(CTEST_DROP_METHOD "http")
|
||||
set(CTEST_DROP_SITE "manao.inria.fr")
|
||||
set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen+3.3")
|
||||
set(CTEST_DROP_SITE "my.cdash.org")
|
||||
set(CTEST_DROP_LOCATION "/submit.php?project=Eigen")
|
||||
set(CTEST_DROP_SITE_CDASH TRUE)
|
||||
#set(CTEST_PROJECT_SUBPROJECTS
|
||||
#Official
|
||||
#Unsupported
|
||||
#)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue