Merge branch 'develop' into add-citation
commit
84b0498b7f
|
@ -9,10 +9,10 @@ tar -zxf ${BOOST_FOLDER}.tar.gz
|
|||
|
||||
# Bootstrap
|
||||
cd ${BOOST_FOLDER}/
|
||||
./bootstrap.sh
|
||||
./bootstrap.sh --with-libraries=serialization,filesystem,thread,system,atomic,date_time,timer,chrono,program_options,regex
|
||||
|
||||
# Build and install
|
||||
sudo ./b2 install
|
||||
sudo ./b2 -j$(nproc) install
|
||||
|
||||
# Rebuild ld cache
|
||||
sudo ldconfig
|
||||
|
|
|
@ -43,46 +43,68 @@ if [ -z ${PYTHON_VERSION+x} ]; then
|
|||
exit 127
|
||||
fi
|
||||
|
||||
PYTHON="python${PYTHON_VERSION}"
|
||||
export PYTHON="python${PYTHON_VERSION}"
|
||||
|
||||
if [[ $(uname) == "Darwin" ]]; then
|
||||
function install_dependencies()
|
||||
{
|
||||
if [[ $(uname) == "Darwin" ]]; then
|
||||
brew install wget
|
||||
else
|
||||
else
|
||||
# Install a system package required by our library
|
||||
sudo apt-get install -y wget libicu-dev python3-pip python3-setuptools
|
||||
fi
|
||||
fi
|
||||
|
||||
PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
|
||||
export PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
|
||||
|
||||
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
|
||||
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
|
||||
|
||||
$PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
|
||||
}
|
||||
|
||||
function build()
|
||||
{
|
||||
mkdir $GITHUB_WORKSPACE/build
|
||||
cd $GITHUB_WORKSPACE/build
|
||||
|
||||
BUILD_PYBIND="ON"
|
||||
|
||||
cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
|
||||
-DGTSAM_BUILD_TESTS=OFF \
|
||||
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_USE_QUATERNIONS=OFF \
|
||||
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
|
||||
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
||||
-DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \
|
||||
-DGTSAM_UNSTABLE_BUILD_PYTHON=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
|
||||
-DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V42=OFF \
|
||||
-DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
|
||||
|
||||
|
||||
BUILD_PYBIND="ON"
|
||||
# Set to 2 cores so that Actions does not error out during resource provisioning.
|
||||
make -j2 install
|
||||
|
||||
sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
|
||||
cd $GITHUB_WORKSPACE/build/python
|
||||
$PYTHON -m pip install --user .
|
||||
}
|
||||
|
||||
mkdir $GITHUB_WORKSPACE/build
|
||||
cd $GITHUB_WORKSPACE/build
|
||||
function test()
|
||||
{
|
||||
cd $GITHUB_WORKSPACE/python/gtsam/tests
|
||||
$PYTHON -m unittest discover -v
|
||||
}
|
||||
|
||||
cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
|
||||
-DGTSAM_BUILD_TESTS=OFF \
|
||||
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_USE_QUATERNIONS=OFF \
|
||||
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
|
||||
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
||||
-DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \
|
||||
-DGTSAM_UNSTABLE_BUILD_PYTHON=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
|
||||
-DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \
|
||||
-DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
|
||||
|
||||
|
||||
# Set to 2 cores so that Actions does not error out during resource provisioning.
|
||||
make -j2 install
|
||||
|
||||
cd $GITHUB_WORKSPACE/build/python
|
||||
$PYTHON setup.py install --user --prefix=
|
||||
cd $GITHUB_WORKSPACE/python/gtsam/tests
|
||||
$PYTHON -m unittest discover
|
||||
# select between build or test
|
||||
case $1 in
|
||||
-d)
|
||||
install_dependencies
|
||||
;;
|
||||
-b)
|
||||
build
|
||||
;;
|
||||
-t)
|
||||
test
|
||||
;;
|
||||
esac
|
||||
|
|
|
@ -64,11 +64,14 @@ function configure()
|
|||
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
|
||||
-DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V41=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V42=${GTSAM_ALLOW_DEPRECATED_SINCE_V42:-OFF} \
|
||||
-DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \
|
||||
-DGTSAM_ROT3_EXPMAP=${GTSAM_ROT3_EXPMAP:-ON} \
|
||||
-DGTSAM_POSE3_EXPMAP=${GTSAM_POSE3_EXPMAP:-ON} \
|
||||
-DGTSAM_USE_SYSTEM_EIGEN=${GTSAM_USE_SYSTEM_EIGEN:-OFF} \
|
||||
-DGTSAM_USE_SYSTEM_METIS=${GTSAM_USE_SYSTEM_METIS:-OFF} \
|
||||
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
||||
-DGTSAM_SINGLE_TEST_EXE=ON \
|
||||
-DBOOST_ROOT=$BOOST_ROOT \
|
||||
-DBoost_NO_SYSTEM_PATHS=ON \
|
||||
-DBoost_ARCHITECTURE=-x64
|
||||
|
@ -92,7 +95,15 @@ function build ()
|
|||
|
||||
configure
|
||||
|
||||
make -j2
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
if (($(nproc) > 2)); then
|
||||
make -j$(nproc)
|
||||
else
|
||||
make -j2
|
||||
fi
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu)
|
||||
fi
|
||||
|
||||
finish
|
||||
}
|
||||
|
@ -105,8 +116,16 @@ function test ()
|
|||
|
||||
configure
|
||||
|
||||
# Actual build:
|
||||
make -j2 check
|
||||
# Actual testing
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
if (($(nproc) > 2)); then
|
||||
make -j$(nproc) check
|
||||
else
|
||||
make -j2 check
|
||||
fi
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu) check
|
||||
fi
|
||||
|
||||
finish
|
||||
}
|
||||
|
|
|
@ -15,31 +15,31 @@ jobs:
|
|||
BOOST_VERSION: 1.67.0
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
fail-fast: true
|
||||
matrix:
|
||||
# Github Actions requires a single row to be added to the build matrix.
|
||||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
name: [
|
||||
ubuntu-18.04-gcc-5,
|
||||
ubuntu-18.04-gcc-9,
|
||||
ubuntu-18.04-clang-9,
|
||||
ubuntu-20.04-gcc-7,
|
||||
ubuntu-20.04-gcc-9,
|
||||
ubuntu-20.04-clang-9,
|
||||
]
|
||||
|
||||
build_type: [Debug, Release]
|
||||
build_unstable: [ON]
|
||||
include:
|
||||
- name: ubuntu-18.04-gcc-5
|
||||
os: ubuntu-18.04
|
||||
- name: ubuntu-20.04-gcc-7
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "5"
|
||||
version: "7"
|
||||
|
||||
- name: ubuntu-18.04-gcc-9
|
||||
os: ubuntu-18.04
|
||||
- name: ubuntu-20.04-gcc-9
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
|
||||
- name: ubuntu-18.04-clang-9
|
||||
os: ubuntu-18.04
|
||||
- name: ubuntu-20.04-clang-9
|
||||
os: ubuntu-20.04
|
||||
compiler: clang
|
||||
version: "9"
|
||||
|
||||
|
@ -47,8 +47,7 @@ jobs:
|
|||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
# LLVM (clang) 9 is not in Bionic's repositories so we add the official LLVM repository.
|
||||
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
|
||||
|
@ -57,13 +56,13 @@ jobs:
|
|||
# 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository
|
||||
# This key is not in the keystore by default for Ubuntu so we need to add it.
|
||||
LLVM_KEY=15CF4D18AF4F7421
|
||||
gpg --keyserver ipv4.pool.sks-keyservers.net --recv-key $LLVM_KEY || gpg --keyserver ha.pool.sks-keyservers.net --recv-key $LLVM_KEY
|
||||
gpg --keyserver keyserver.ubuntu.com --recv-key $LLVM_KEY || gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-key $LLVM_KEY
|
||||
gpg -a --export $LLVM_KEY | sudo apt-key add -
|
||||
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
|
||||
fi
|
||||
sudo apt-get -y update
|
||||
|
||||
sudo apt-get install cmake build-essential pkg-config libpython-dev python-numpy libicu-dev
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev
|
||||
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
|
||||
|
@ -79,7 +78,5 @@ jobs:
|
|||
run: |
|
||||
bash .github/scripts/boost.sh
|
||||
|
||||
- name: Build and Test (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
bash .github/scripts/unix.sh -t
|
||||
- name: Build and Test
|
||||
run: bash .github/scripts/unix.sh -t
|
||||
|
|
|
@ -19,23 +19,22 @@ jobs:
|
|||
# Github Actions requires a single row to be added to the build matrix.
|
||||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
name: [
|
||||
macOS-10.15-xcode-11.3.1,
|
||||
macos-11-xcode-13.4.1,
|
||||
]
|
||||
|
||||
build_type: [Debug, Release]
|
||||
build_unstable: [ON]
|
||||
include:
|
||||
- name: macOS-10.15-xcode-11.3.1
|
||||
os: macOS-10.15
|
||||
- name: macos-11-xcode-13.4.1
|
||||
os: macos-11
|
||||
compiler: xcode
|
||||
version: "11.3.1"
|
||||
version: "13.4.1"
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
brew install cmake ninja
|
||||
brew install boost
|
||||
|
@ -44,11 +43,9 @@ jobs:
|
|||
echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
else
|
||||
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
|
||||
sudo xcode-select -switch /Applications/Xcode.app
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Build and Test (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
bash .github/scripts/unix.sh -t
|
||||
- name: Build and Test
|
||||
run: bash .github/scripts/unix.sh -t
|
||||
|
|
|
@ -19,51 +19,49 @@ jobs:
|
|||
# Github Actions requires a single row to be added to the build matrix.
|
||||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
name: [
|
||||
# ubuntu-18.04-gcc-5, #TODO Enable once the Python wrapper is optimized for memory
|
||||
ubuntu-18.04-gcc-9,
|
||||
ubuntu-18.04-clang-9,
|
||||
macOS-10.15-xcode-11.3.1,
|
||||
# ubuntu-18.04-gcc-5-tbb,
|
||||
ubuntu-20.04-gcc-7,
|
||||
ubuntu-20.04-gcc-9,
|
||||
ubuntu-20.04-clang-9,
|
||||
macOS-11-xcode-13.4.1,
|
||||
ubuntu-20.04-gcc-7-tbb,
|
||||
]
|
||||
|
||||
#TODO update wrapper to prevent OOM
|
||||
# build_type: [Debug, Release]
|
||||
build_type: [Release]
|
||||
build_type: [Debug, Release]
|
||||
python_version: [3]
|
||||
include:
|
||||
# - name: ubuntu-18.04-gcc-5
|
||||
# os: ubuntu-18.04
|
||||
# compiler: gcc
|
||||
# version: "5"
|
||||
- name: ubuntu-20.04-gcc-7
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "7"
|
||||
|
||||
- name: ubuntu-18.04-gcc-9
|
||||
os: ubuntu-18.04
|
||||
- name: ubuntu-20.04-gcc-9
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
|
||||
- name: ubuntu-18.04-clang-9
|
||||
os: ubuntu-18.04
|
||||
- name: ubuntu-20.04-clang-9
|
||||
os: ubuntu-20.04
|
||||
compiler: clang
|
||||
version: "9"
|
||||
|
||||
#NOTE temporarily added this as it is a required check.
|
||||
- name: ubuntu-18.04-clang-9
|
||||
os: ubuntu-18.04
|
||||
# NOTE temporarily added this as it is a required check.
|
||||
- name: ubuntu-20.04-clang-9
|
||||
os: ubuntu-20.04
|
||||
compiler: clang
|
||||
version: "9"
|
||||
build_type: Debug
|
||||
python_version: "3"
|
||||
|
||||
- name: macOS-10.15-xcode-11.3.1
|
||||
os: macOS-10.15
|
||||
- name: macOS-11-xcode-13.4.1
|
||||
os: macOS-11
|
||||
compiler: xcode
|
||||
version: "11.3.1"
|
||||
version: "13.4.1"
|
||||
|
||||
# - name: ubuntu-18.04-gcc-5-tbb
|
||||
# os: ubuntu-18.04
|
||||
# compiler: gcc
|
||||
# version: "5"
|
||||
# flag: tbb
|
||||
- name: ubuntu-20.04-gcc-7-tbb
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "7"
|
||||
flag: tbb
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
@ -77,13 +75,13 @@ jobs:
|
|||
# 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository
|
||||
# This key is not in the keystore by default for Ubuntu so we need to add it.
|
||||
LLVM_KEY=15CF4D18AF4F7421
|
||||
gpg --keyserver ipv4.pool.sks-keyservers.net --recv-key $LLVM_KEY || gpg --keyserver ha.pool.sks-keyservers.net --recv-key $LLVM_KEY
|
||||
gpg --keyserver keyserver.ubuntu.com --recv-key $LLVM_KEY || gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-key $LLVM_KEY
|
||||
gpg -a --export $LLVM_KEY | sudo apt-key add -
|
||||
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
|
||||
fi
|
||||
|
||||
sudo apt-get -y update
|
||||
|
||||
sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libboost-all-dev
|
||||
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
|
||||
|
@ -105,7 +103,7 @@ jobs:
|
|||
echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
|
||||
else
|
||||
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
|
||||
sudo xcode-select -switch /Applications/Xcode.app
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
fi
|
||||
|
@ -114,11 +112,17 @@ jobs:
|
|||
run: |
|
||||
echo "GTSAM_WITH_TBB=ON" >> $GITHUB_ENV
|
||||
echo "GTSAM Uses TBB"
|
||||
- name: Build (Linux)
|
||||
- name: Set Swap Space
|
||||
if: runner.os == 'Linux'
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 6
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
bash .github/scripts/python.sh
|
||||
- name: Build (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
bash .github/scripts/python.sh -d
|
||||
- name: Build
|
||||
run: |
|
||||
bash .github/scripts/python.sh
|
||||
bash .github/scripts/python.sh -b
|
||||
- name: Test
|
||||
run: |
|
||||
bash .github/scripts/python.sh -t
|
||||
|
|
|
@ -32,29 +32,35 @@ jobs:
|
|||
|
||||
include:
|
||||
- name: ubuntu-gcc-deprecated
|
||||
os: ubuntu-18.04
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
flag: deprecated
|
||||
|
||||
- name: ubuntu-gcc-quaternions
|
||||
os: ubuntu-18.04
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
flag: quaternions
|
||||
|
||||
- name: ubuntu-gcc-tbb
|
||||
os: ubuntu-18.04
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
flag: tbb
|
||||
|
||||
- name: ubuntu-cayleymap
|
||||
os: ubuntu-18.04
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
flag: cayley
|
||||
|
||||
- name: ubuntu-system-libs
|
||||
os: ubuntu-20.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
flag: system-libs
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
@ -64,13 +70,13 @@ jobs:
|
|||
run: |
|
||||
# LLVM 9 is not in Bionic's repositories so we add the official LLVM repository.
|
||||
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
|
||||
gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421
|
||||
gpg --keyserver keyserver.ubuntu.com --recv-key $LLVM_KEY || gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-key $LLVM_KEY
|
||||
gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add -
|
||||
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
|
||||
fi
|
||||
sudo apt-get -y update
|
||||
|
||||
sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libicu-dev
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev
|
||||
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
|
||||
|
@ -104,7 +110,7 @@ jobs:
|
|||
- name: Set Allow Deprecated Flag
|
||||
if: matrix.flag == 'deprecated'
|
||||
run: |
|
||||
echo "GTSAM_ALLOW_DEPRECATED_SINCE_V41=ON" >> $GITHUB_ENV
|
||||
echo "GTSAM_ALLOW_DEPRECATED_SINCE_V42=ON" >> $GITHUB_ENV
|
||||
echo "Allow deprecated since version 4.1"
|
||||
|
||||
- name: Set Use Quaternions Flag
|
||||
|
@ -126,6 +132,12 @@ jobs:
|
|||
echo "GTSAM_ROT3_EXPMAP=OFF" >> $GITHUB_ENV
|
||||
echo "GTSAM Uses Cayley map for Rot3"
|
||||
|
||||
- name: Use system versions of 3rd party libraries
|
||||
if: matrix.flag == 'system'
|
||||
run: |
|
||||
echo "GTSAM_USE_SYSTEM_EIGEN=ON" >> $GITHUB_ENV
|
||||
echo "GTSAM_USE_SYSTEM_METIS=ON" >> $GITHUB_ENV
|
||||
|
||||
- name: Build & Test
|
||||
run: |
|
||||
bash .github/scripts/unix.sh -t
|
||||
|
|
|
@ -12,42 +12,52 @@ jobs:
|
|||
CTEST_PARALLEL_LEVEL: 2
|
||||
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
|
||||
GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
|
||||
BOOST_VERSION: 1.72.0
|
||||
BOOST_EXE: boost_1_72_0-msvc-14.2
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# Github Actions requires a single row to be added to the build matrix.
|
||||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
name: [
|
||||
#TODO This build keeps timing out, need to understand why.
|
||||
# windows-2016-cl,
|
||||
windows-2019-cl,
|
||||
]
|
||||
#TODO This build fails, need to understand why.
|
||||
# windows-2016-cl,
|
||||
windows-2019-cl,
|
||||
]
|
||||
|
||||
build_type: [Debug, Release]
|
||||
build_type: [
|
||||
Debug,
|
||||
#TODO(Varun) The release build takes over 2.5 hours, need to figure out why.
|
||||
# Release
|
||||
]
|
||||
build_unstable: [ON]
|
||||
include:
|
||||
|
||||
#TODO This build keeps timing out, need to understand why.
|
||||
#TODO This build fails, need to understand why.
|
||||
# - name: windows-2016-cl
|
||||
# os: windows-2016
|
||||
# compiler: cl
|
||||
# platform: 32
|
||||
|
||||
- name: windows-2019-cl
|
||||
os: windows-2019
|
||||
compiler: cl
|
||||
platform: 64
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Install (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
- name: Install Dependencies
|
||||
shell: powershell
|
||||
run: |
|
||||
Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh')
|
||||
iwr -useb get.scoop.sh -outfile 'install_scoop.ps1'
|
||||
.\install_scoop.ps1 -RunAsAdmin
|
||||
|
||||
scoop install cmake --global # So we don't get issues with CMP0074 policy
|
||||
scoop install ninja --global
|
||||
|
||||
if ("${{ matrix.compiler }}".StartsWith("clang")) {
|
||||
scoop install llvm --global
|
||||
}
|
||||
|
||||
if ("${{ matrix.compiler }}" -eq "gcc") {
|
||||
# Chocolatey GCC is broken on the windows-2019 image.
|
||||
# See: https://github.com/DaanDeMeyer/doctest/runs/231595515
|
||||
|
@ -55,33 +65,64 @@ jobs:
|
|||
scoop install gcc --global
|
||||
echo "CC=gcc" >> $GITHUB_ENV
|
||||
echo "CXX=g++" >> $GITHUB_ENV
|
||||
|
||||
} elseif ("${{ matrix.compiler }}" -eq "clang") {
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
|
||||
} else {
|
||||
echo "CC=${{ matrix.compiler }}" >> $GITHUB_ENV
|
||||
echo "CXX=${{ matrix.compiler }}" >> $GITHUB_ENV
|
||||
echo "CC=${{ matrix.compiler }}" >> $env:GITHUB_ENV
|
||||
echo "CXX=${{ matrix.compiler }}" >> $env:GITHUB_ENV
|
||||
|
||||
}
|
||||
|
||||
# Scoop modifies the PATH so we make the modified PATH global.
|
||||
echo "$env:PATH" >> $GITHUB_PATH
|
||||
echo "$env:PATH" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Download and install Boost
|
||||
uses: MarkusJx/install-boost@v1.0.1
|
||||
id: install-boost
|
||||
with:
|
||||
boost_version: 1.72.0
|
||||
toolset: msvc14.2
|
||||
- name: Install Boost
|
||||
shell: powershell
|
||||
run: |
|
||||
# Snippet from: https://github.com/actions/virtual-environments/issues/2667
|
||||
$BOOST_PATH = "C:\hostedtoolcache\windows\Boost\$env:BOOST_VERSION\x86_64"
|
||||
|
||||
- name: Build (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
env:
|
||||
BOOST_ROOT: ${{ steps.install-boost.outputs.BOOST_ROOT }}
|
||||
# Use the prebuilt binary for Windows
|
||||
$Url = "https://sourceforge.net/projects/boost/files/boost-binaries/$env:BOOST_VERSION/$env:BOOST_EXE-${{matrix.platform}}.exe"
|
||||
(New-Object System.Net.WebClient).DownloadFile($Url, "$env:TEMP\boost.exe")
|
||||
Start-Process -Wait -FilePath "$env:TEMP\boost.exe" "/SILENT","/SP-","/SUPPRESSMSGBOXES","/DIR=$BOOST_PATH"
|
||||
|
||||
# Set the BOOST_ROOT variable
|
||||
echo "BOOST_ROOT=$BOOST_PATH" >> $env:GITHUB_ENV
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Configuration
|
||||
run: |
|
||||
cmake -E remove_directory build
|
||||
cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib"
|
||||
cmake --build build --config ${{ matrix.build_type }} --target gtsam
|
||||
cmake --build build --config ${{ matrix.build_type }} --target gtsam_unstable
|
||||
cmake --build build --config ${{ matrix.build_type }} --target wrap
|
||||
cmake --build build --config ${{ matrix.build_type }} --target check.base
|
||||
cmake --build build --config ${{ matrix.build_type }} --target check.base_unstable
|
||||
cmake --build build --config ${{ matrix.build_type }} --target check.linear
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
# Since Visual Studio is a multi-generator, we need to use --config
|
||||
# https://stackoverflow.com/a/24470998/1236990
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target gtsam
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target gtsam_unstable
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target wrap
|
||||
|
||||
# Run GTSAM tests
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.basis
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.discrete
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.geometry
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.inference
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.linear
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.navigation
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.nonlinear
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.sam
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.sfm
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.slam
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.symbolic
|
||||
|
||||
# Run GTSAM_UNSTABLE tests
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base_unstable
|
||||
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
# This triggers building of packages
|
||||
name: Trigger Package Builds
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
jobs:
|
||||
trigger-package-build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger Package Rebuild
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
github-token: ${{ secrets.PACKAGING_REPO_ACCESS_TOKEN }}
|
||||
script: |
|
||||
await github.rest.actions.createWorkflowDispatch({
|
||||
owner: 'borglab-launchpad',
|
||||
repo: 'gtsam-packaging',
|
||||
workflow_id: 'main.yaml',
|
||||
ref: 'master'
|
||||
})
|
|
@ -3,6 +3,7 @@
|
|||
.idea
|
||||
*.pyc
|
||||
*.DS_Store
|
||||
*.swp
|
||||
/examples/Data/dubrovnik-3-7-pre-rewritten.txt
|
||||
/examples/Data/pose2example-rewritten.txt
|
||||
/examples/Data/pose3example-rewritten.txt
|
||||
|
@ -16,3 +17,5 @@
|
|||
# for QtCreator:
|
||||
CMakeLists.txt.user*
|
||||
xcode/
|
||||
/Dockerfile
|
||||
/python/gtsam/notebooks/.ipynb_checkpoints/ellipses-checkpoint.ipynb
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
project(GTSAM CXX C)
|
||||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
# new feature to Cmake Version > 2.8.12
|
||||
|
@ -9,12 +8,23 @@ endif()
|
|||
|
||||
# Set the version number for the library
|
||||
set (GTSAM_VERSION_MAJOR 4)
|
||||
set (GTSAM_VERSION_MINOR 1)
|
||||
set (GTSAM_VERSION_MINOR 2)
|
||||
set (GTSAM_VERSION_PATCH 0)
|
||||
set (GTSAM_PRERELEASE_VERSION "a8")
|
||||
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
||||
|
||||
set (CMAKE_PROJECT_VERSION ${GTSAM_VERSION_STRING})
|
||||
if (${GTSAM_VERSION_PATCH} EQUAL 0)
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}${GTSAM_PRERELEASE_VERSION}")
|
||||
else()
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}${GTSAM_PRERELEASE_VERSION}")
|
||||
endif()
|
||||
|
||||
project(GTSAM
|
||||
LANGUAGES CXX C
|
||||
VERSION "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
||||
|
||||
message(STATUS "GTSAM Version: ${GTSAM_VERSION_STRING}")
|
||||
|
||||
set (CMAKE_PROJECT_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
|
||||
set (CMAKE_PROJECT_VERSION_MINOR ${GTSAM_VERSION_MINOR})
|
||||
set (CMAKE_PROJECT_VERSION_PATCH ${GTSAM_VERSION_PATCH})
|
||||
|
@ -38,11 +48,14 @@ if(${GTSAM_SOURCE_DIR} STREQUAL ${GTSAM_BINARY_DIR})
|
|||
message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ")
|
||||
endif()
|
||||
|
||||
include(cmake/HandleGeneralOptions.cmake) # CMake build options
|
||||
|
||||
# Libraries:
|
||||
include(cmake/HandleBoost.cmake) # Boost
|
||||
include(cmake/HandleCCache.cmake) # ccache
|
||||
include(cmake/HandleCPack.cmake) # CPack
|
||||
include(cmake/HandleEigen.cmake) # Eigen3
|
||||
include(cmake/HandleGeneralOptions.cmake) # CMake build options
|
||||
include(cmake/HandleMetis.cmake) # metis
|
||||
include(cmake/HandleMKL.cmake) # MKL
|
||||
include(cmake/HandleOpenMP.cmake) # OpenMP
|
||||
include(cmake/HandlePerfTools.cmake) # Google perftools
|
||||
|
@ -84,6 +97,11 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
|
|||
CACHE STRING "The Python version to use for wrapping")
|
||||
# Set the include directory for matlab.h
|
||||
set(GTWRAP_INCLUDE_NAME "wrap")
|
||||
|
||||
# Copy matlab.h to the correct folder.
|
||||
configure_file(${PROJECT_SOURCE_DIR}/wrap/matlab.h
|
||||
${PROJECT_BINARY_DIR}/wrap/matlab.h COPYONLY)
|
||||
|
||||
add_subdirectory(wrap)
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/wrap/cmake")
|
||||
endif()
|
||||
|
@ -102,6 +120,11 @@ endif()
|
|||
GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
|
||||
export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake)
|
||||
|
||||
if (GTSAM_BUILD_UNSTABLE)
|
||||
GtsamMakeConfigFile(GTSAM_UNSTABLE "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
|
||||
export(TARGETS ${GTSAM_UNSTABLE_EXPORTED_TARGETS} FILE GTSAM_UNSTABLE-exports.cmake)
|
||||
endif()
|
||||
|
||||
# Check for doxygen availability - optional dependency
|
||||
find_package(Doxygen)
|
||||
|
||||
|
|
|
@ -15,5 +15,7 @@ For example:
|
|||
```cpp
|
||||
class GTSAM_EXPORT MyClass { ... };
|
||||
|
||||
GTSAM_EXPORT myFunction();
|
||||
GTSAM_EXPORT return_type myFunction();
|
||||
```
|
||||
|
||||
More details [here](Using-GTSAM-EXPORT.md).
|
||||
|
|
22
INSTALL.md
22
INSTALL.md
|
@ -13,16 +13,18 @@ $ make install
|
|||
## Important Installation Notes
|
||||
|
||||
1. GTSAM requires the following libraries to be installed on your system:
|
||||
- BOOST version 1.58 or greater (install through Linux repositories or MacPorts)
|
||||
- BOOST version 1.65 or greater (install through Linux repositories or MacPorts). Please see [Boost Notes](#boost-notes) for version recommendations based on your compiler.
|
||||
|
||||
- Cmake version 3.0 or higher
|
||||
- Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher
|
||||
|
||||
Optional dependent libraries:
|
||||
- If TBB is installed and detectable by CMake GTSAM will use it automatically.
|
||||
Ensure that CMake prints "Use Intel TBB : Yes". To disable the use of TBB,
|
||||
disable the CMake flag GTSAM_WITH_TBB (enabled by default). On Ubuntu, TBB
|
||||
may be installed from the Ubuntu repositories, and for other platforms it
|
||||
may be downloaded from https://www.threadingbuildingblocks.org/
|
||||
disable the CMake flag `GTSAM_WITH_TBB` (enabled by default) by providing
|
||||
the argument `-DGTSAM_WITH_TBB=OFF` to `cmake`. On Ubuntu, TBB may be
|
||||
installed from the Ubuntu repositories, and for other platforms it may be
|
||||
downloaded from https://www.threadingbuildingblocks.org/
|
||||
- GTSAM may be configured to use MKL by toggling `GTSAM_WITH_EIGEN_MKL` and
|
||||
`GTSAM_WITH_EIGEN_MKL_OPENMP` to `ON`; however, best performance is usually
|
||||
achieved with MKL disabled. We therefore advise you to benchmark your problem
|
||||
|
@ -48,7 +50,7 @@ will run up to 10x faster in Release mode! See the end of this document for
|
|||
additional debugging tips.
|
||||
|
||||
3. GTSAM has Doxygen documentation. To generate, run 'make doc' from your
|
||||
build directory.
|
||||
build directory after setting the `GTSAM_BUILD_DOCS` and `GTSAM_BUILD_[HTML|LATEX]` cmake flags.
|
||||
|
||||
4. The instructions below install the library to the default system install path and
|
||||
build all components. From a terminal, starting in the root library folder,
|
||||
|
@ -65,11 +67,15 @@ execute commands as follows for an out-of-source build:
|
|||
This will build the library and unit tests, run all of the unit tests,
|
||||
and then install the library itself.
|
||||
|
||||
## Boost Notes
|
||||
|
||||
Versions of Boost prior to 1.65 have a known bug that prevents proper "deep" serialization of objects, which means that objects encapsulated inside other objects don't get serialized.
|
||||
This is particularly seen when using `clang` as the C++ compiler.
|
||||
|
||||
For this reason we recommend Boost>=1.65, and recommend installing it through alternative channels when it is not available through your operating system's primary package manager.
|
||||
|
||||
## Known Issues
|
||||
|
||||
- When using `GTSAM_BUILD_WITH_MARCH_NATIVE=ON`, you may encounter issues in running tests which we are still investigating:
|
||||
- Use of a version of GCC < 7.5 results in an "Indeterminant Linear System" error for `testSmartProjectionFactor`.
|
||||
- Use of Boost version < 1.67 with clang will give a segfault for mulitple test cases.
|
||||
- MSVC 2013 is not yet supported because it cannot build the serialization module of Boost 1.55 (or earlier).
|
||||
|
||||
# Windows Installation
|
||||
|
|
12
README.md
12
README.md
|
@ -31,11 +31,11 @@ In the root library folder execute:
|
|||
|
||||
```sh
|
||||
#!bash
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake ..
|
||||
$ make check (optional, runs unit tests)
|
||||
$ make install
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make check (optional, runs unit tests)
|
||||
make install
|
||||
```
|
||||
|
||||
Prerequisites:
|
||||
|
@ -68,7 +68,7 @@ We provide support for [MATLAB](matlab/README.md) and [Python](python/README.md)
|
|||
|
||||
If you are using GTSAM for academic work, please use the following citation:
|
||||
|
||||
```
|
||||
```bibtex
|
||||
@software{gtsam,
|
||||
author = {Frank Dellaert and Richard Roberts and Varun Agrawal and Alex Cunningham and Chris Beall and Duy-Nguyen Ta and Fan Jiang and lucacarlone and nikai and Jose Luis Blanco-Claraco and Stephen Williams and ydjian and John Lambert and Andy Melim and Zhaoyang Lv and Akshay Krishnan and Jing Dong and Gerry Chen and Krunal Chande and balderdash-devil and DiffDecisionTrees and Sungtae An and mpaluri and Ellon Paiva Mendes and Mike Bosse and Akash Patel and Ayush Baid and Paul Furgale and matthewbroadwaynavenio and roderick-koehle},
|
||||
title = {borglab/gtsam},
|
||||
|
|
|
@ -8,9 +8,10 @@ To create a DLL in windows, the `GTSAM_EXPORT` keyword has been created and need
|
|||
* At least one of the functions inside that class is declared in a .cpp file and not just the .h file.
|
||||
* You can `GTSAM_EXPORT` any class it inherits from as well. (Note that this implictly requires the class does not derive from a "header-only" class. Note that Eigen is a "header-only" library, so if your class derives from Eigen, _do not_ use `GTSAM_EXPORT` in the class definition!)
|
||||
3. If you have defined a class using `GTSAM_EXPORT`, do not use `GTSAM_EXPORT` in any of its individual function declarations. (Note that you _can_ put `GTSAM_EXPORT` in the definition of individual functions within a class as long as you don't put `GTSAM_EXPORT` in the class definition.)
|
||||
4. For template specializations, you need to add `GTSAM_EXPORT` to each individual specialization.
|
||||
|
||||
## When is GTSAM_EXPORT being used incorrectly
|
||||
Unfortunately, using `GTSAM_EXPORT` incorrectly often does not cause a compiler or linker error in the library that is being compiled, but only when you try to use that DLL in a different library. For example, an error in gtsam/base will often show up when compiling the check_base_program or the MATLAB wrapper, but not when compiling/linking gtsam itself. The most common errors will say something like:
|
||||
Unfortunately, using `GTSAM_EXPORT` incorrectly often does not cause a compiler or linker error in the library that is being compiled, but only when you try to use that DLL in a different library. For example, an error in `gtsam/base` will often show up when compiling the `check_base_program` or the MATLAB wrapper, but not when compiling/linking gtsam itself. The most common errors will say something like:
|
||||
|
||||
```
|
||||
Error LNK2019 unresolved external symbol "public: void __cdecl gtsam::SO3::print(class std::basic_string<char,struct std::char_traits<char>,class std::allocator<char> > const &)const " (?print@SO3@gtsam@@QEBAXAEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@@Z) referenced in function "public: static void __cdecl gtsam::Testable<class gtsam::SO3>::Print(class gtsam::SO3 const &,class std::basic_string<char,struct std::char_traits<char>,class std::allocator<char> > const &)" (?Print@?$Testable@VSO3@gtsam@@@gtsam@@SAXAEBVSO3@2@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@@Z) check_geometry_program C:\AFIT\lib\gtsam\build\gtsam\geometry\tests\testSO3.obj
|
||||
|
@ -29,7 +30,7 @@ Rule #1 doesn't seem very bad, until you combine it with rule #2
|
|||
|
||||
***Compiler Rule #2*** Anything declared in a header file is not included in a DLL.
|
||||
|
||||
When these two rules are combined, you get some very confusing results. For example, a class which is completely defined in a header (e.g. LieMatrix) cannot use `GTSAM_EXPORT` in its definition. If LieMatrix is defined with `GTSAM_EXPORT`, then the compiler _must_ find LieMatrix in a DLL. Because LieMatrix is a header-only class, however, it can't find it, leading to a very confusing "I can't find this symbol" type of error. Note that the linker says it can't find the symbol even though the compiler found the header file that completely defines the class.
|
||||
When these two rules are combined, you get some very confusing results. For example, a class which is completely defined in a header (e.g. Foo) cannot use `GTSAM_EXPORT` in its definition. If Foo is defined with `GTSAM_EXPORT`, then the compiler _must_ find Foo in a DLL. Because Foo is a header-only class, however, it can't find it, leading to a very confusing "I can't find this symbol" type of error. Note that the linker says it can't find the symbol even though the compiler found the header file that completely defines the class.
|
||||
|
||||
Also note that when a class that you want to export inherits from another class that is not exportable, this can cause significant issues. According to this [MSVC Warning page](https://docs.microsoft.com/en-us/cpp/error-messages/compiler-warnings/compiler-warning-level-2-c4275?view=vs-2019), it may not strictly be a rule, but we have seen several linker errors when a class that is defined with `GTSAM_EXPORT` extended an Eigen class. In general, it appears that any inheritance of non-exportable class by an exportable class is a bad idea.
|
||||
|
||||
|
|
|
@ -21,6 +21,10 @@ else()
|
|||
find_dependency(Boost @BOOST_FIND_MINIMUM_VERSION@ COMPONENTS @BOOST_FIND_MINIMUM_COMPONENTS@)
|
||||
endif()
|
||||
|
||||
if(@GTSAM_USE_SYSTEM_EIGEN@)
|
||||
find_dependency(Eigen3 REQUIRED)
|
||||
endif()
|
||||
|
||||
# Load exports
|
||||
include(${OUR_CMAKE_DIR}/@PACKAGE_NAME@-exports.cmake)
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,81 +0,0 @@
|
|||
# - Try to find Eigen3 lib
|
||||
#
|
||||
# This module supports requiring a minimum version, e.g. you can do
|
||||
# find_package(Eigen3 3.1.2)
|
||||
# to require version 3.1.2 or newer of Eigen3.
|
||||
#
|
||||
# Once done this will define
|
||||
#
|
||||
# EIGEN3_FOUND - system has eigen lib with correct version
|
||||
# EIGEN3_INCLUDE_DIR - the eigen include directory
|
||||
# EIGEN3_VERSION - eigen version
|
||||
|
||||
# Copyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>
|
||||
# Copyright (c) 2008, 2009 Gael Guennebaud, <g.gael@free.fr>
|
||||
# Copyright (c) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
# Redistribution and use is allowed according to the terms of the 2-clause BSD license.
|
||||
|
||||
if(NOT Eigen3_FIND_VERSION)
|
||||
if(NOT Eigen3_FIND_VERSION_MAJOR)
|
||||
set(Eigen3_FIND_VERSION_MAJOR 2)
|
||||
endif(NOT Eigen3_FIND_VERSION_MAJOR)
|
||||
if(NOT Eigen3_FIND_VERSION_MINOR)
|
||||
set(Eigen3_FIND_VERSION_MINOR 91)
|
||||
endif(NOT Eigen3_FIND_VERSION_MINOR)
|
||||
if(NOT Eigen3_FIND_VERSION_PATCH)
|
||||
set(Eigen3_FIND_VERSION_PATCH 0)
|
||||
endif(NOT Eigen3_FIND_VERSION_PATCH)
|
||||
|
||||
set(Eigen3_FIND_VERSION "${Eigen3_FIND_VERSION_MAJOR}.${Eigen3_FIND_VERSION_MINOR}.${Eigen3_FIND_VERSION_PATCH}")
|
||||
endif(NOT Eigen3_FIND_VERSION)
|
||||
|
||||
macro(_eigen3_check_version)
|
||||
file(READ "${EIGEN3_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h" _eigen3_version_header)
|
||||
|
||||
string(REGEX MATCH "define[ \t]+EIGEN_WORLD_VERSION[ \t]+([0-9]+)" _eigen3_world_version_match "${_eigen3_version_header}")
|
||||
set(EIGEN3_WORLD_VERSION "${CMAKE_MATCH_1}")
|
||||
string(REGEX MATCH "define[ \t]+EIGEN_MAJOR_VERSION[ \t]+([0-9]+)" _eigen3_major_version_match "${_eigen3_version_header}")
|
||||
set(EIGEN3_MAJOR_VERSION "${CMAKE_MATCH_1}")
|
||||
string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen3_minor_version_match "${_eigen3_version_header}")
|
||||
set(EIGEN3_MINOR_VERSION "${CMAKE_MATCH_1}")
|
||||
|
||||
set(EIGEN3_VERSION ${EIGEN3_WORLD_VERSION}.${EIGEN3_MAJOR_VERSION}.${EIGEN3_MINOR_VERSION})
|
||||
if(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})
|
||||
set(EIGEN3_VERSION_OK FALSE)
|
||||
else(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})
|
||||
set(EIGEN3_VERSION_OK TRUE)
|
||||
endif(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})
|
||||
|
||||
if(NOT EIGEN3_VERSION_OK)
|
||||
|
||||
message(STATUS "Eigen3 version ${EIGEN3_VERSION} found in ${EIGEN3_INCLUDE_DIR}, "
|
||||
"but at least version ${Eigen3_FIND_VERSION} is required")
|
||||
endif(NOT EIGEN3_VERSION_OK)
|
||||
endmacro(_eigen3_check_version)
|
||||
|
||||
if (EIGEN3_INCLUDE_DIR)
|
||||
|
||||
# in cache already
|
||||
_eigen3_check_version()
|
||||
set(EIGEN3_FOUND ${EIGEN3_VERSION_OK})
|
||||
|
||||
else (EIGEN3_INCLUDE_DIR)
|
||||
|
||||
find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library
|
||||
PATHS
|
||||
${CMAKE_INSTALL_PREFIX}/include
|
||||
${KDE4_INCLUDE_DIR}
|
||||
PATH_SUFFIXES eigen3 eigen
|
||||
)
|
||||
|
||||
if(EIGEN3_INCLUDE_DIR)
|
||||
_eigen3_check_version()
|
||||
endif(EIGEN3_INCLUDE_DIR)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(Eigen3 DEFAULT_MSG EIGEN3_INCLUDE_DIR EIGEN3_VERSION_OK)
|
||||
|
||||
mark_as_advanced(EIGEN3_INCLUDE_DIR)
|
||||
|
||||
endif(EIGEN3_INCLUDE_DIR)
|
||||
|
|
@ -144,7 +144,8 @@ if(NOT TBB_FOUND)
|
|||
|
||||
elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
|
||||
# OS X
|
||||
set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb")
|
||||
set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb"
|
||||
"/usr/local/opt/tbb")
|
||||
|
||||
# TODO: Check to see which C++ library is being used by the compiler.
|
||||
if(NOT ${CMAKE_SYSTEM_VERSION} VERSION_LESS 13.0)
|
||||
|
@ -181,7 +182,18 @@ if(NOT TBB_FOUND)
|
|||
##################################
|
||||
|
||||
if(TBB_INCLUDE_DIRS)
|
||||
file(READ "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h" _tbb_version_file)
|
||||
set(_tbb_version_file_prior_to_tbb_2021_1 "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h")
|
||||
set(_tbb_version_file_after_tbb_2021_1 "${TBB_INCLUDE_DIRS}/oneapi/tbb/version.h")
|
||||
|
||||
if (EXISTS "${_tbb_version_file_prior_to_tbb_2021_1}")
|
||||
file(READ "${_tbb_version_file_prior_to_tbb_2021_1}" _tbb_version_file )
|
||||
elseif (EXISTS "${_tbb_version_file_after_tbb_2021_1}")
|
||||
file(READ "${_tbb_version_file_after_tbb_2021_1}" _tbb_version_file )
|
||||
else()
|
||||
message(FATAL_ERROR "Found TBB installation: ${TBB_INCLUDE_DIRS} "
|
||||
"missing version header.")
|
||||
endif()
|
||||
|
||||
string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1"
|
||||
TBB_VERSION_MAJOR "${_tbb_version_file}")
|
||||
string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
include(CheckCXXCompilerFlag) # for check_cxx_compiler_flag()
|
||||
|
||||
# Set cmake policy to recognize the AppleClang compiler
|
||||
# Set cmake policy to recognize the Apple Clang compiler
|
||||
# independently from the Clang compiler.
|
||||
if(POLICY CMP0025)
|
||||
cmake_policy(SET CMP0025 NEW)
|
||||
|
@ -87,12 +87,16 @@ if(MSVC)
|
|||
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE
|
||||
WINDOWS_LEAN_AND_MEAN
|
||||
NOMINMAX
|
||||
)
|
||||
)
|
||||
# Avoid literally hundreds to thousands of warnings:
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC
|
||||
/wd4267 # warning C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data
|
||||
/wd4267 # warning C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data
|
||||
)
|
||||
|
||||
add_compile_options(/wd4005)
|
||||
add_compile_options(/wd4101)
|
||||
add_compile_options(/wd4834)
|
||||
|
||||
endif()
|
||||
|
||||
# Other (non-preprocessor macros) compiler flags:
|
||||
|
@ -179,19 +183,43 @@ set(CMAKE_EXE_LINKER_FLAGS_PROFILING ${GTSAM_CMAKE_EXE_LINKER_FLAGS_PROFILING})
|
|||
|
||||
# Clang uses a template depth that is less than standard and is too small
|
||||
if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")
|
||||
# Apple Clang before 5.0 does not support -ftemplate-depth.
|
||||
if(NOT (APPLE AND "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "5.0"))
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-ftemplate-depth=1024")
|
||||
endif()
|
||||
# Apple Clang before 5.0 does not support -ftemplate-depth.
|
||||
if(NOT (APPLE AND "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "5.0"))
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-ftemplate-depth=1024")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (NOT MSVC)
|
||||
option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" ON)
|
||||
option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" OFF)
|
||||
if(GTSAM_BUILD_WITH_MARCH_NATIVE)
|
||||
# Add as public flag so all dependant projects also use it, as required
|
||||
# by Eigen to avid crashes due to SIMD vectorization:
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native")
|
||||
endif()
|
||||
# Check if Apple OS and compiler is [Apple]Clang
|
||||
if(APPLE AND (${CMAKE_CXX_COMPILER_ID} MATCHES "^(Apple)?Clang$"))
|
||||
# Check Clang version since march=native is only supported for version 15.0+.
|
||||
if("${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "15.0")
|
||||
if(NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
|
||||
# Add as public flag so all dependent projects also use it, as required
|
||||
# by Eigen to avoid crashes due to SIMD vectorization:
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native")
|
||||
else()
|
||||
message(WARNING "Option GTSAM_BUILD_WITH_MARCH_NATIVE ignored, because native architecture is not supported for Apple silicon and AppleClang version < 15.0.")
|
||||
endif() # CMAKE_SYSTEM_PROCESSOR
|
||||
else()
|
||||
# Add as public flag so all dependent projects also use it, as required
|
||||
# by Eigen to avoid crashes due to SIMD vectorization:
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native")
|
||||
endif() # CMAKE_CXX_COMPILER_VERSION
|
||||
else()
|
||||
include(CheckCXXCompilerFlag)
|
||||
CHECK_CXX_COMPILER_FLAG("-march=native" COMPILER_SUPPORTS_MARCH_NATIVE)
|
||||
if(COMPILER_SUPPORTS_MARCH_NATIVE)
|
||||
# Add as public flag so all dependent projects also use it, as required
|
||||
# by Eigen to avoid crashes due to SIMD vectorization:
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native")
|
||||
else()
|
||||
message(WARNING "Option GTSAM_BUILD_WITH_MARCH_NATIVE ignored, because native architecture is not supported.")
|
||||
endif() # COMPILER_SUPPORTS_MARCH_NATIVE
|
||||
endif() # APPLE
|
||||
endif() # GTSAM_BUILD_WITH_MARCH_NATIVE
|
||||
endif()
|
||||
|
||||
# Set up build type library postfixes
|
||||
|
|
|
@ -27,6 +27,8 @@ function(GtsamMakeConfigFile PACKAGE_NAME)
|
|||
# here.
|
||||
if(NOT DEFINED ${PACKAGE_NAME}_VERSION AND DEFINED ${PACKAGE_NAME}_VERSION_STRING)
|
||||
set(${PACKAGE_NAME}_VERSION ${${PACKAGE_NAME}_VERSION_STRING})
|
||||
elseif(NOT DEFINED ${PACKAGE_NAME}_VERSION_STRING)
|
||||
set(${PACKAGE_NAME}_VERSION ${GTSAM_VERSION_STRING})
|
||||
endif()
|
||||
|
||||
# Version file
|
||||
|
|
|
@ -22,7 +22,7 @@ endif()
|
|||
|
||||
|
||||
# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such.
|
||||
set(BOOST_FIND_MINIMUM_VERSION 1.58)
|
||||
set(BOOST_FIND_MINIMUM_VERSION 1.65)
|
||||
set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex)
|
||||
|
||||
find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS})
|
||||
|
@ -30,7 +30,7 @@ find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM
|
|||
# Required components
|
||||
if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR
|
||||
NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY)
|
||||
message(FATAL_ERROR "Missing required Boost components >= v1.58, please install/upgrade Boost or configure your search paths.")
|
||||
message(FATAL_ERROR "Missing required Boost components >= v1.65, please install/upgrade Boost or configure your search paths.")
|
||||
endif()
|
||||
|
||||
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
|
||||
|
|
|
@ -25,4 +25,4 @@ set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION
|
|||
|
||||
# Deb-package specific cpack
|
||||
set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev")
|
||||
set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.58)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)")
|
||||
set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.65)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)")
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
###############################################################################
|
||||
# Option for using system Eigen or GTSAM-bundled Eigen
|
||||
|
||||
option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF)
|
||||
|
||||
if(NOT GTSAM_USE_SYSTEM_EIGEN)
|
||||
|
@ -11,10 +10,14 @@ endif()
|
|||
|
||||
# Switch for using system Eigen or GTSAM-bundled Eigen
|
||||
if(GTSAM_USE_SYSTEM_EIGEN)
|
||||
find_package(Eigen3 REQUIRED)
|
||||
# Since Eigen 3.3.0 a Eigen3Config.cmake is available so use it.
|
||||
find_package(Eigen3 CONFIG REQUIRED) # need to find again as REQUIRED
|
||||
|
||||
# Use generic Eigen include paths e.g. <Eigen/Core>
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}")
|
||||
# The actual include directory (for BUILD cmake target interface):
|
||||
# Note: EIGEN3_INCLUDE_DIR points to some random location on some eigen
|
||||
# versions. So here I use the target itself to get the proper include
|
||||
# directory (it is generated by cmake, thus has the correct path)
|
||||
get_target_property(GTSAM_EIGEN_INCLUDE_FOR_BUILD Eigen3::Eigen INTERFACE_INCLUDE_DIRECTORIES)
|
||||
|
||||
# check if MKL is also enabled - can have one or the other, but not both!
|
||||
# Note: Eigen >= v3.2.5 includes our patches
|
||||
|
@ -27,9 +30,6 @@ if(GTSAM_USE_SYSTEM_EIGEN)
|
|||
if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4))
|
||||
message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.")
|
||||
endif()
|
||||
|
||||
# The actual include directory (for BUILD cmake target interface):
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}")
|
||||
else()
|
||||
# Use bundled Eigen include path.
|
||||
# Clear any variables set by FindEigen3
|
||||
|
@ -42,7 +42,20 @@ else()
|
|||
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/")
|
||||
|
||||
# The actual include directory (for BUILD cmake target interface):
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${GTSAM_SOURCE_DIR}/gtsam/3rdparty/Eigen/")
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${GTSAM_SOURCE_DIR}/gtsam/3rdparty/Eigen")
|
||||
|
||||
add_library(gtsam_eigen3 INTERFACE)
|
||||
|
||||
target_include_directories(gtsam_eigen3 INTERFACE
|
||||
$<BUILD_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_BUILD}>
|
||||
$<INSTALL_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}>
|
||||
)
|
||||
add_library(Eigen3::Eigen ALIAS gtsam_eigen3)
|
||||
|
||||
install(TARGETS gtsam_eigen3 EXPORT GTSAM-exports PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
|
||||
|
||||
list(APPEND GTSAM_EXPORTED_TARGETS gtsam_eigen3)
|
||||
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}")
|
||||
endif()
|
||||
|
||||
# Detect Eigen version:
|
||||
|
|
|
@ -14,20 +14,21 @@ if(GTSAM_UNSTABLE_AVAILABLE)
|
|||
option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON)
|
||||
option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF)
|
||||
endif()
|
||||
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
||||
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
|
||||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
|
||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
|
||||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
|
||||
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
|
||||
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
|
||||
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
||||
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
|
||||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
|
||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
|
||||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V42 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
|
||||
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
|
||||
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
|
||||
option(GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR "Use the slower but correct version of BetweenFactor" OFF)
|
||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||
option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
|
||||
endif()
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
###############################################################################
|
||||
# Metis library
|
||||
|
||||
# For both system or bundle version, a cmake target "metis-gtsam-if" is defined (interface library)
|
||||
|
||||
# Dont try to use metis if GTSAM_SUPPORT_NESTED_DISSECTION is disabled:
|
||||
if (NOT GTSAM_SUPPORT_NESTED_DISSECTION)
|
||||
return()
|
||||
endif()
|
||||
|
||||
option(GTSAM_USE_SYSTEM_METIS "Find and use system-installed libmetis. If 'off', use the one bundled with GTSAM" OFF)
|
||||
|
||||
if(GTSAM_USE_SYSTEM_METIS)
|
||||
# Debian package: libmetis-dev
|
||||
|
||||
find_path(METIS_INCLUDE_DIR metis.h REQUIRED)
|
||||
find_library(METIS_LIBRARY metis REQUIRED)
|
||||
|
||||
if(METIS_INCLUDE_DIR AND METIS_LIBRARY)
|
||||
mark_as_advanced(METIS_INCLUDE_DIR)
|
||||
mark_as_advanced(METIS_LIBRARY)
|
||||
|
||||
add_library(metis-gtsam-if INTERFACE)
|
||||
target_include_directories(metis-gtsam-if BEFORE INTERFACE ${METIS_INCLUDE_DIR}
|
||||
# gtsam_unstable/partition/FindSeparator-inl.h uses internal metislib.h API
|
||||
# via extern "C"
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
|
||||
)
|
||||
target_link_libraries(metis-gtsam-if INTERFACE ${METIS_LIBRARY})
|
||||
endif()
|
||||
else()
|
||||
# Bundled version:
|
||||
option(GTSAM_BUILD_METIS_EXECUTABLES "Build metis library executables" OFF)
|
||||
add_subdirectory(${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis)
|
||||
|
||||
target_include_directories(metis-gtsam BEFORE PUBLIC
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/metis/>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/include>
|
||||
# gtsam_unstable/partition/FindSeparator-inl.h uses internal metislib.h API
|
||||
# via extern "C"
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
|
||||
)
|
||||
|
||||
add_library(metis-gtsam-if INTERFACE)
|
||||
target_link_libraries(metis-gtsam-if INTERFACE metis-gtsam)
|
||||
endif()
|
||||
|
||||
list(APPEND GTSAM_EXPORTED_TARGETS metis-gtsam-if)
|
||||
install(TARGETS metis-gtsam-if EXPORT GTSAM-exports ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
|
|
@ -32,6 +32,8 @@ endif()
|
|||
print_build_options_for_target(gtsam)
|
||||
|
||||
print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
|
||||
print_config("Use System Metis" "${GTSAM_USE_SYSTEM_METIS}")
|
||||
print_config("Using Boost version" "${Boost_VERSION}")
|
||||
|
||||
if(GTSAM_USE_TBB)
|
||||
print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})")
|
||||
|
@ -85,7 +87,7 @@ print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as defaul
|
|||
print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ")
|
||||
print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
|
||||
print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
|
||||
print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1")
|
||||
print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V42} "Allow features deprecated in GTSAM 4.1")
|
||||
print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
|
||||
print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
|
|||
|
||||
set(Python_VERSION_MAJOR ${PYTHON_VERSION_MAJOR})
|
||||
set(Python_VERSION_MINOR ${PYTHON_VERSION_MINOR})
|
||||
set(Python_VERSION_PATCH ${PYTHON_VERSION_PATCH})
|
||||
set(Python_EXECUTABLE ${PYTHON_EXECUTABLE})
|
||||
|
||||
else()
|
||||
|
@ -31,11 +32,12 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
|
|||
|
||||
set(Python_VERSION_MAJOR ${Python3_VERSION_MAJOR})
|
||||
set(Python_VERSION_MINOR ${Python3_VERSION_MINOR})
|
||||
set(Python_VERSION_PATCH ${Python3_VERSION_PATCH})
|
||||
|
||||
endif()
|
||||
|
||||
set(GTSAM_PYTHON_VERSION
|
||||
"${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}"
|
||||
"${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}.${Python_VERSION_PATCH}"
|
||||
CACHE STRING "The version of Python to build the wrappers against."
|
||||
FORCE)
|
||||
|
||||
|
|
|
@ -1,24 +1,32 @@
|
|||
###############################################################################
|
||||
# Find TBB
|
||||
find_package(TBB 4.4 COMPONENTS tbb tbbmalloc)
|
||||
if (GTSAM_WITH_TBB)
|
||||
# Find TBB
|
||||
find_package(TBB 4.4 COMPONENTS tbb tbbmalloc)
|
||||
|
||||
# Set up variables if we're using TBB
|
||||
if(TBB_FOUND AND GTSAM_WITH_TBB)
|
||||
set(GTSAM_USE_TBB 1) # This will go into config.h
|
||||
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
|
||||
set(TBB_GREATER_EQUAL_2020 1)
|
||||
# Set up variables if we're using TBB
|
||||
if(TBB_FOUND)
|
||||
set(GTSAM_USE_TBB 1) # This will go into config.h
|
||||
|
||||
# if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
|
||||
# message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
|
||||
# endif()
|
||||
|
||||
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
|
||||
set(TBB_GREATER_EQUAL_2020 1)
|
||||
else()
|
||||
set(TBB_GREATER_EQUAL_2020 0)
|
||||
endif()
|
||||
# all definitions and link requisites will go via imported targets:
|
||||
# tbb & tbbmalloc
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
|
||||
else()
|
||||
set(TBB_GREATER_EQUAL_2020 0)
|
||||
set(GTSAM_USE_TBB 0) # This will go into config.h
|
||||
endif()
|
||||
|
||||
###############################################################################
|
||||
# Prohibit Timing build mode in combination with TBB
|
||||
if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing"))
|
||||
message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.")
|
||||
endif()
|
||||
# all definitions and link requisites will go via imported targets:
|
||||
# tbb & tbbmalloc
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
|
||||
else()
|
||||
set(GTSAM_USE_TBB 0) # This will go into config.h
|
||||
endif()
|
||||
|
||||
###############################################################################
|
||||
# Prohibit Timing build mode in combination with TBB
|
||||
if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing"))
|
||||
message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.")
|
||||
endif()
|
||||
|
|
|
@ -22,18 +22,19 @@ if (GTSAM_BUILD_DOCS)
|
|||
|
||||
# GTSAM core subfolders
|
||||
set(gtsam_doc_subdirs
|
||||
gtsam/base
|
||||
gtsam/discrete
|
||||
gtsam/geometry
|
||||
gtsam/inference
|
||||
gtsam/linear
|
||||
gtsam/navigation
|
||||
gtsam/nonlinear
|
||||
gtsam/sam
|
||||
gtsam/sfm
|
||||
gtsam/slam
|
||||
gtsam/smart
|
||||
gtsam/symbolic
|
||||
gtsam/base
|
||||
gtsam/basis
|
||||
gtsam/discrete
|
||||
gtsam/geometry
|
||||
gtsam/hybrid
|
||||
gtsam/inference
|
||||
gtsam/linear
|
||||
gtsam/navigation
|
||||
gtsam/nonlinear
|
||||
gtsam/sam
|
||||
gtsam/sfm
|
||||
gtsam/slam
|
||||
gtsam/symbolic
|
||||
gtsam
|
||||
)
|
||||
|
||||
|
@ -41,10 +42,12 @@ if (GTSAM_BUILD_DOCS)
|
|||
set(gtsam_unstable_doc_subdirs
|
||||
gtsam_unstable/base
|
||||
gtsam_unstable/discrete
|
||||
gtsam_unstable/dynamics
|
||||
gtsam_unstable/geometry
|
||||
gtsam_unstable/linear
|
||||
gtsam_unstable/nonlinear
|
||||
gtsam_unstable/slam
|
||||
gtsam_unstable/dynamics
|
||||
gtsam_unstable/nonlinear
|
||||
gtsam_unstable/partition
|
||||
gtsam_unstable/slam
|
||||
gtsam_unstable
|
||||
)
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// add unary measurement factors, like GPS, on all three poses
|
||||
noiseModel::Diagonal::shared_ptr unaryNoise =
|
||||
auto unaryNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector2(0.1, 0.1)); // 10cm std on x,y
|
||||
graph.add(boost::make_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise));
|
||||
graph.add(boost::make_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise));
|
||||
graph.add(boost::make_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise));
|
||||
graph.emplace_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise);
|
||||
graph.emplace_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise);
|
||||
graph.emplace_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise);
|
||||
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
class UnaryFactor: public NoiseModelFactor1<Pose2> {
|
||||
double mx_, my_; ///< X and Y measurements
|
||||
|
||||
|
||||
public:
|
||||
UnaryFactor(Key j, double x, double y, const SharedNoiseModel& model):
|
||||
NoiseModelFactor1<Pose2>(model, j), mx_(x), my_(y) {}
|
||||
|
||||
Vector evaluateError(const Pose2& q,
|
||||
boost::optional<Matrix&> H = boost::none) const
|
||||
{
|
||||
Vector evaluateError(const Pose2& q,
|
||||
boost::optional<Matrix&> H = boost::none) const override {
|
||||
const Rot2& R = q.rotation();
|
||||
if (H) (*H) = (gtsam::Matrix(2, 3) <<
|
||||
R.c(), -R.s(), 0.0,
|
||||
|
|
|
@ -3,13 +3,11 @@ NonlinearFactorGraph graph;
|
|||
|
||||
// Add a Gaussian prior on pose x_1
|
||||
Pose2 priorMean(0.0, 0.0, 0.0);
|
||||
noiseModel::Diagonal::shared_ptr priorNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
|
||||
graph.addPrior(1, priorMean, priorNoise);
|
||||
auto priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
|
||||
graph.add(PriorFactor<Pose2>(1, priorMean, priorNoise));
|
||||
|
||||
// Add two odometry factors
|
||||
Pose2 odometry(2.0, 0.0, 0.0);
|
||||
noiseModel::Diagonal::shared_ptr odometryNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
|
||||
auto odometryNoise = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
|
||||
graph.add(BetweenFactor<Pose2>(1, 2, odometry, odometryNoise));
|
||||
graph.add(BetweenFactor<Pose2>(2, 3, odometry, odometryNoise));
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
Factor Graph:
|
||||
size: 3
|
||||
factor 0: PriorFactor on 1
|
||||
prior mean: (0, 0, 0)
|
||||
|
||||
Factor 0: PriorFactor on 1
|
||||
prior mean: (0, 0, 0)
|
||||
noise model: diagonal sigmas [0.3; 0.3; 0.1];
|
||||
factor 1: BetweenFactor(1,2)
|
||||
measured: (2, 0, 0)
|
||||
noise model: diagonal sigmas [0.2; 0.2; 0.1];
|
||||
factor 2: BetweenFactor(2,3)
|
||||
measured: (2, 0, 0)
|
||||
|
||||
Factor 1: BetweenFactor(1,2)
|
||||
measured: (2, 0, 0)
|
||||
noise model: diagonal sigmas [0.2; 0.2; 0.1];
|
||||
|
||||
Factor 2: BetweenFactor(2,3)
|
||||
measured: (2, 0, 0)
|
||||
noise model: diagonal sigmas [0.2; 0.2; 0.1];
|
|
@ -1,11 +1,23 @@
|
|||
Initial Estimate:
|
||||
|
||||
Values with 3 values:
|
||||
Value 1: (0.5, 0, 0.2)
|
||||
Value 2: (2.3, 0.1, -0.2)
|
||||
Value 3: (4.1, 0.1, 0.1)
|
||||
Value 1: (gtsam::Pose2)
|
||||
(0.5, 0, 0.2)
|
||||
|
||||
Value 2: (gtsam::Pose2)
|
||||
(2.3, 0.1, -0.2)
|
||||
|
||||
Value 3: (gtsam::Pose2)
|
||||
(4.1, 0.1, 0.1)
|
||||
|
||||
Final Result:
|
||||
|
||||
Values with 3 values:
|
||||
Value 1: (-1.8e-16, 8.7e-18, -9.1e-19)
|
||||
Value 2: (2, 7.4e-18, -2.5e-18)
|
||||
Value 3: (4, -1.8e-18, -3.1e-18)
|
||||
Value 1: (gtsam::Pose2)
|
||||
(7.5-16, -5.3-16, -1.8-16)
|
||||
|
||||
Value 2: (gtsam::Pose2)
|
||||
(2, -1.1-15, -2.5-16)
|
||||
|
||||
Value 3: (gtsam::Pose2)
|
||||
(4, -1.7-15, -2.5-16)
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
x1 covariance:
|
||||
0.09 1.1e-47 5.7e-33
|
||||
1.1e-47 0.09 1.9e-17
|
||||
5.7e-33 1.9e-17 0.01
|
||||
0.09 1.7e-33 2.8e-33
|
||||
1.7e-33 0.09 2.6e-17
|
||||
2.8e-33 2.6e-17 0.01
|
||||
x2 covariance:
|
||||
0.13 4.7e-18 2.4e-18
|
||||
4.7e-18 0.17 0.02
|
||||
2.4e-18 0.02 0.02
|
||||
0.13 1.2e-18 6.1e-19
|
||||
1.2e-18 0.17 0.02
|
||||
6.1e-19 0.02 0.02
|
||||
x3 covariance:
|
||||
0.17 2.7e-17 8.4e-18
|
||||
2.7e-17 0.37 0.06
|
||||
8.4e-18 0.06 0.03
|
||||
0.17 8.6e-18 2.7e-18
|
||||
8.6e-18 0.37 0.06
|
||||
2.7e-18 0.06 0.03
|
2973
doc/Doxyfile.in
2973
doc/Doxyfile.in
File diff suppressed because it is too large
Load Diff
|
@ -18,7 +18,6 @@
|
|||
<tab type="files" visible="yes" title="" intro=""/>
|
||||
<tab type="globals" visible="yes" title="" intro=""/>
|
||||
</tab>
|
||||
<tab type="dirs" visible="yes" title="" intro=""/>
|
||||
<tab type="examples" visible="yes" title="" intro=""/>
|
||||
</navindex>
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
289
doc/gtsam.lyx
289
doc/gtsam.lyx
|
@ -1,5 +1,5 @@
|
|||
#LyX 2.2 created this file. For more info see http://www.lyx.org/
|
||||
\lyxformat 508
|
||||
#LyX 2.3 created this file. For more info see http://www.lyx.org/
|
||||
\lyxformat 544
|
||||
\begin_document
|
||||
\begin_header
|
||||
\save_transient_properties true
|
||||
|
@ -62,6 +62,8 @@
|
|||
\font_osf false
|
||||
\font_sf_scale 100 100
|
||||
\font_tt_scale 100 100
|
||||
\use_microtype false
|
||||
\use_dash_ligatures true
|
||||
\graphics default
|
||||
\default_output_format default
|
||||
\output_sync 0
|
||||
|
@ -91,6 +93,7 @@
|
|||
\suppress_date false
|
||||
\justification true
|
||||
\use_refstyle 0
|
||||
\use_minted 0
|
||||
\index Index
|
||||
\shortcut idx
|
||||
\color #008000
|
||||
|
@ -105,7 +108,10 @@
|
|||
\tocdepth 3
|
||||
\paragraph_separation indent
|
||||
\paragraph_indentation default
|
||||
\quotes_language english
|
||||
\is_math_indent 0
|
||||
\math_numbering_side default
|
||||
\quotes_style english
|
||||
\dynamic_quotes 0
|
||||
\papercolumns 1
|
||||
\papersides 1
|
||||
\paperpagestyle default
|
||||
|
@ -128,14 +134,10 @@ A Hands-on Introduction
|
|||
|
||||
\begin_layout Author
|
||||
Frank Dellaert
|
||||
\begin_inset Newline newline
|
||||
\end_inset
|
||||
|
||||
Technical Report number GT-RIM-CP&R-2014-XXX
|
||||
\end_layout
|
||||
|
||||
\begin_layout Date
|
||||
September 2014
|
||||
Updated Last March 2022
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
|
@ -148,18 +150,14 @@ filename "common_macros.tex"
|
|||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Section*
|
||||
Overview
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
In this document I provide a hands-on introduction to both factor graphs
|
||||
and GTSAM.
|
||||
This is an updated version from the 2012 TR that is tailored to our GTSAM
|
||||
3.0 library and beyond.
|
||||
4.0 library and beyond.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
|
||||
\series bold
|
||||
Factor graphs
|
||||
|
@ -168,6 +166,7 @@ Factor graphs
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citep
|
||||
key "Koller09book"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -199,7 +198,7 @@ ts or prior knowledge.
|
|||
robotics and vision.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
The GTSAM toolbox (GTSAM stands for
|
||||
\begin_inset Quotes eld
|
||||
\end_inset
|
||||
|
@ -214,11 +213,13 @@ Georgia Tech Smoothing and Mapping
|
|||
It provides state of the art solutions to the SLAM and SFM problems, but
|
||||
can also be used to model and solve both simpler and more complex estimation
|
||||
problems.
|
||||
It also provides a MATLAB interface which allows for rapid prototype developmen
|
||||
t, visualization, and user interaction.
|
||||
It also provides MATLAB and Python wrappers which allow for rapid prototype
|
||||
development, visualization, and user interaction.
|
||||
In addition, it is easy to use in Jupyter notebooks and/or Google's coLaborator
|
||||
y.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
GTSAM exploits sparsity to be computationally efficient.
|
||||
Typically measurements only provide information on the relationship between
|
||||
a handful of variables, and hence the resulting factor graph will be sparsely
|
||||
|
@ -229,14 +230,17 @@ l complexity.
|
|||
GTSAM provides iterative methods that are quite efficient regardless.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
You can download the latest version of GTSAM at
|
||||
\begin_layout Abstract
|
||||
You can download the latest version of GTSAM from GitHub at
|
||||
\end_layout
|
||||
|
||||
\begin_layout Abstract
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
http://tinyurl.com/gtsam
|
||||
https://github.com/borglab/gtsam
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
@ -270,6 +274,7 @@ Let us start with a one-page primer on factor graphs, which in no way replaces
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Kschischang01it"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -277,6 +282,7 @@ key "Kschischang01it"
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Loeliger04spm"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -732,7 +738,7 @@ noindent
|
|||
\begin_inset Formula $f_{0}(x_{1})$
|
||||
\end_inset
|
||||
|
||||
on lines 5-8 as an instance of
|
||||
on lines 5-7 as an instance of
|
||||
\series bold
|
||||
\emph on
|
||||
PriorFactor<T>
|
||||
|
@ -764,7 +770,7 @@ Pose2,
|
|||
noiseModel::Diagonal
|
||||
\series default
|
||||
\emph default
|
||||
by specifying three standard deviations in line 7, respectively 30 cm.
|
||||
by specifying three standard deviations in line 6, respectively 30 cm.
|
||||
\begin_inset space ~
|
||||
\end_inset
|
||||
|
||||
|
@ -786,7 +792,7 @@ Similarly, odometry measurements are specified as
|
|||
Pose2
|
||||
\series default
|
||||
\emph default
|
||||
on line 11, with a slightly different noise model defined on line 12-13.
|
||||
on line 10, with a slightly different noise model defined on line 11.
|
||||
We then add the two factors
|
||||
\begin_inset Formula $f_{1}(x_{1},x_{2};o_{1})$
|
||||
\end_inset
|
||||
|
@ -795,7 +801,7 @@ Pose2
|
|||
\begin_inset Formula $f_{2}(x_{2},x_{3};o_{2})$
|
||||
\end_inset
|
||||
|
||||
on lines 14-15, as instances of yet another templated class,
|
||||
on lines 12-13, as instances of yet another templated class,
|
||||
\series bold
|
||||
\emph on
|
||||
BetweenFactor<T>
|
||||
|
@ -866,7 +872,7 @@ smoothing and mapping
|
|||
|
||||
.
|
||||
Later in this document we will talk about how we can also use GTSAM to
|
||||
do filtering (which you often do
|
||||
do filtering (which often you do
|
||||
\emph on
|
||||
not
|
||||
\emph default
|
||||
|
@ -919,7 +925,11 @@ Values
|
|||
\begin_layout Standard
|
||||
The latter point is often a point of confusion with beginning users of GTSAM.
|
||||
It helps to remember that when designing GTSAM we took a functional approach
|
||||
of classes corresponding to mathematical objects, which are usually immutable.
|
||||
of classes corresponding to mathematical objects, which are usually
|
||||
\emph on
|
||||
immutable
|
||||
\emph default
|
||||
.
|
||||
You should think of a factor graph as a
|
||||
\emph on
|
||||
function
|
||||
|
@ -1018,7 +1028,7 @@ NonlinearFactorGraph
|
|||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The relevant output from running the example is as follows:
|
||||
The relevant output from running the example is as follows:
|
||||
\family typewriter
|
||||
\size small
|
||||
|
||||
|
@ -1321,6 +1331,7 @@ r in a pre-existing map, or indeed the presence of absence of ceiling lights
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Dellaert99b"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -1353,14 +1364,18 @@ where
|
|||
\end_inset
|
||||
|
||||
is the measurement,
|
||||
\begin_inset Formula $q$
|
||||
\begin_inset Formula $q\in SE(2)$
|
||||
\end_inset
|
||||
|
||||
is the unknown variable,
|
||||
\begin_inset Formula $h(q)$
|
||||
\end_inset
|
||||
|
||||
is a (possibly nonlinear) measurement function, and
|
||||
is a
|
||||
\series bold
|
||||
measurement function
|
||||
\series default
|
||||
, and
|
||||
\begin_inset Formula $\Sigma$
|
||||
\end_inset
|
||||
|
||||
|
@ -1536,12 +1551,13 @@ E(q)\define h(q)-m
|
|||
|
||||
\end_inset
|
||||
|
||||
which is done on line 12.
|
||||
which is done on line 14.
|
||||
Importantly, because we want to use this factor for nonlinear optimization
|
||||
(see e.g.,
|
||||
\begin_inset CommandInset citation
|
||||
LatexCommand citealt
|
||||
key "Dellaert06ijrr"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -1588,11 +1604,11 @@ q_{y}
|
|||
\begin_inset Formula $q=\left(q_{x},q_{y},q_{\theta}\right)$
|
||||
\end_inset
|
||||
|
||||
, yields the following simple
|
||||
, yields the following
|
||||
\begin_inset Formula $2\times3$
|
||||
\end_inset
|
||||
|
||||
matrix in tangent space which is the same the as the rotation matrix:
|
||||
matrix:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
|
@ -1607,6 +1623,171 @@ H=\left[\begin{array}{ccc}
|
|||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Paragraph*
|
||||
Important Note
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Many of our users, when attempting to create a custom factor, are initially
|
||||
surprised at the Jacobian matrix not agreeing with their intuition.
|
||||
For example, above you might simply expect a
|
||||
\begin_inset Formula $2\times3$
|
||||
\end_inset
|
||||
|
||||
identity matrix.
|
||||
This
|
||||
\emph on
|
||||
would
|
||||
\emph default
|
||||
be true for variables belonging to a vector space.
|
||||
However, in GTSAM we define the Jacobian more generally to be the matrix
|
||||
|
||||
\begin_inset Formula $H$
|
||||
\end_inset
|
||||
|
||||
such that
|
||||
\begin_inset Formula
|
||||
\[
|
||||
h(q\exp\hat{\xi})\approx h(q)+H\xi
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
where
|
||||
\begin_inset Formula $\xi=(\delta x,\delta y,\delta\theta)$
|
||||
\end_inset
|
||||
|
||||
is an incremental update and
|
||||
\begin_inset Formula $\exp\hat{\xi}$
|
||||
\end_inset
|
||||
|
||||
is the
|
||||
\series bold
|
||||
exponential map
|
||||
\series default
|
||||
for the variable we want to update.
|
||||
In this case
|
||||
\begin_inset Formula $q\in SE(2)$
|
||||
\end_inset
|
||||
|
||||
, where
|
||||
\begin_inset Formula $SE(2)$
|
||||
\end_inset
|
||||
|
||||
is the group of 2D rigid transforms, implemented by
|
||||
\series bold
|
||||
\emph on
|
||||
Pose2
|
||||
\emph default
|
||||
.
|
||||
|
||||
\series default
|
||||
The exponential map for
|
||||
\begin_inset Formula $SE(2)$
|
||||
\end_inset
|
||||
|
||||
can be approximated to first order as
|
||||
\begin_inset Formula
|
||||
\[
|
||||
\exp\hat{\xi}\approx\left[\begin{array}{ccc}
|
||||
1 & -\delta\theta & \delta x\\
|
||||
\delta\theta & 1 & \delta y\\
|
||||
0 & 0 & 1
|
||||
\end{array}\right]
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
when using the
|
||||
\begin_inset Formula $3\times3$
|
||||
\end_inset
|
||||
|
||||
matrix representation for 2D poses, and hence
|
||||
\begin_inset Formula
|
||||
\[
|
||||
h(qe^{\hat{\xi}})\approx h\left(\left[\begin{array}{ccc}
|
||||
\cos(q_{\theta}) & -\sin(q_{\theta}) & q_{x}\\
|
||||
\sin(q_{\theta}) & \cos(q_{\theta}) & q_{y}\\
|
||||
0 & 0 & 1
|
||||
\end{array}\right]\left[\begin{array}{ccc}
|
||||
1 & -\delta\theta & \delta x\\
|
||||
\delta\theta & 1 & \delta y\\
|
||||
0 & 0 & 1
|
||||
\end{array}\right]\right)=\left[\begin{array}{c}
|
||||
q_{x}+\cos(q_{\theta})\delta x-\sin(q_{\theta})\delta y\\
|
||||
q_{y}+\sin(q_{\theta})\delta x+\cos(q_{\theta})\delta y
|
||||
\end{array}\right]
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
which then explains the Jacobian
|
||||
\begin_inset Formula $H$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Lie groups are very relevant in the robotics context, and you can read more
|
||||
here:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Itemize
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
https://github.com/borglab/gtsam/blob/develop/doc/LieGroups.pdf
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Itemize
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
https://github.com/borglab/gtsam/blob/develop/doc/math.pdf
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
In some cases you want to go even beyond Lie groups to a looser concept,
|
||||
|
||||
\series bold
|
||||
manifolds
|
||||
\series default
|
||||
, because not all unknown variables behave like a group, e.g., the space of
|
||||
3D planes, 2D lines, directions in space, etc.
|
||||
For manifolds we do not always have an exponential map, but we have a retractio
|
||||
n that plays the same role.
|
||||
Some of this is explained here:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Itemize
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
https://gtsam.org/notes/GTSAM-Concepts.html
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
|
@ -1669,13 +1850,13 @@ UnaryFactor
|
|||
\series default
|
||||
\emph default
|
||||
instances, and add them to graph.
|
||||
GTSAM uses shared pointers to refer to factors in factor graphs, and
|
||||
GTSAM uses shared pointers to refer to factors, and
|
||||
\series bold
|
||||
\emph on
|
||||
boost::make_shared
|
||||
emplace_shared
|
||||
\series default
|
||||
\emph default
|
||||
is a convenience function to simultaneously construct a class and create
|
||||
is a convenience method to simultaneously construct a class and create
|
||||
a
|
||||
\series bold
|
||||
\emph on
|
||||
|
@ -1683,22 +1864,6 @@ shared_ptr
|
|||
\series default
|
||||
\emph default
|
||||
to it.
|
||||
|
||||
\begin_inset Note Note
|
||||
status collapsed
|
||||
|
||||
\begin_layout Plain Layout
|
||||
and on lines 4-6 we add three newly created
|
||||
\series bold
|
||||
\emph on
|
||||
UnaryFactor
|
||||
\series default
|
||||
\emph default
|
||||
instances to the graph.
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
We obtain the factor graph from Figure
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand vref
|
||||
|
@ -1936,8 +2101,8 @@ reference "fig:CompareMarginals"
|
|||
|
||||
\end_inset
|
||||
|
||||
, where I show the marginals on position as covariance ellipses that contain
|
||||
68.26% of all probability mass.
|
||||
, where I show the marginals on position as 5-sigma covariance ellipses
|
||||
that contain 99.9996% of all probability mass.
|
||||
For the odometry marginals, it is immediately apparent from the figure
|
||||
that (1) the uncertainty on pose keeps growing, and (2) the uncertainty
|
||||
on angular odometry translates into increasing uncertainty on y.
|
||||
|
@ -1992,6 +2157,7 @@ PoseSLAM
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citep
|
||||
key "DurrantWhyte06ram"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -2190,9 +2356,9 @@ reference "fig:example"
|
|||
\end_inset
|
||||
|
||||
, along with covariance ellipses shown in green.
|
||||
These covariance ellipses in 2D indicate the marginal over position, over
|
||||
all possible orientations, and show the area which contain 68.26% of the
|
||||
probability mass (in 1D this would correspond to one standard deviation).
|
||||
These 5-sigma covariance ellipses in 2D indicate the marginal over position,
|
||||
over all possible orientations, and show the area which contain 99.9996%
|
||||
of the probability mass.
|
||||
The graph shows in a clear manner that the uncertainty on pose
|
||||
\begin_inset Formula $x_{5}$
|
||||
\end_inset
|
||||
|
@ -3076,6 +3242,7 @@ reference "fig:Victoria-1"
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citep
|
||||
key "Kaess09ras"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -3088,6 +3255,7 @@ key "Kaess09ras"
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citep
|
||||
key "Kaess08tro"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -3355,6 +3523,7 @@ iSAM
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Kaess08tro,Kaess12ijrr"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -3606,6 +3775,7 @@ subgraph preconditioning
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Dellaert10iros,Jian11iccv"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -3638,6 +3808,7 @@ Visual Odometry
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Nister04cvpr2"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -3661,6 +3832,7 @@ Visual SLAM
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Davison03iccv"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -3711,6 +3883,7 @@ Filtering
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citep
|
||||
key "Smith87b"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
|
BIN
doc/gtsam.pdf
BIN
doc/gtsam.pdf
Binary file not shown.
396
doc/math.lyx
396
doc/math.lyx
|
@ -2668,7 +2668,7 @@ reference "eq:pushforward"
|
|||
\begin{eqnarray*}
|
||||
\varphi(a)e^{\yhat} & = & \varphi(ae^{\xhat})\\
|
||||
a^{-1}e^{\yhat} & = & \left(ae^{\xhat}\right)^{-1}\\
|
||||
e^{\yhat} & = & -ae^{\xhat}a^{-1}\\
|
||||
e^{\yhat} & = & ae^{-\xhat}a^{-1}\\
|
||||
\yhat & = & -\Ad a\xhat
|
||||
\end{eqnarray*}
|
||||
|
||||
|
@ -3003,8 +3003,8 @@ between
|
|||
\begin_inset Formula
|
||||
\begin{align}
|
||||
\varphi(g,h)e^{\yhat} & =\varphi(ge^{\xhat},h)\nonumber \\
|
||||
g^{-1}he^{\yhat} & =\left(ge^{\xhat}\right)^{-1}h=-e^{\xhat}g^{-1}h\nonumber \\
|
||||
e^{\yhat} & =-\left(h^{-1}g\right)e^{\xhat}\left(h^{-1}g\right)^{-1}=-\exp\Ad{\left(h^{-1}g\right)}\xhat\nonumber \\
|
||||
g^{-1}he^{\yhat} & =\left(ge^{\xhat}\right)^{-1}h=e^{-\xhat}g^{-1}h\nonumber \\
|
||||
e^{\yhat} & =\left(h^{-1}g\right)e^{-\xhat}\left(h^{-1}g\right)^{-1}=\exp\Ad{\left(h^{-1}g\right)}(-\xhat)\nonumber \\
|
||||
\yhat & =-\Ad{\left(h^{-1}g\right)}\xhat=-\Ad{\varphi\left(h,g\right)}\xhat\label{eq:Dbetween1}
|
||||
\end{align}
|
||||
|
||||
|
@ -5082,6 +5082,394 @@ reference "ex:projection"
|
|||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
Derivative of Adjoint
|
||||
\begin_inset CommandInset label
|
||||
LatexCommand label
|
||||
name "subsec:pose3_adjoint_deriv"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Consider
|
||||
\begin_inset Formula $f:SE(3)\times\mathbb{R}^{6}\rightarrow\mathbb{R}^{6}$
|
||||
\end_inset
|
||||
|
||||
is defined as
|
||||
\begin_inset Formula $f(T,\xi_{b})=Ad_{T}\hat{\xi}_{b}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
The derivative is notated (see Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "sec:Derivatives-of-Actions"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
):
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
Df_{(T,\xi_{b})}(\xi,\delta\xi_{b})=D_{1}f_{(T,\xi_{b})}(\xi)+D_{2}f_{(T,\xi_{b})}(\delta\xi_{b})
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
First, computing
|
||||
\begin_inset Formula $D_{2}f_{(T,\xi_{b})}(\xi_{b})$
|
||||
\end_inset
|
||||
|
||||
is easy, as its matrix is simply
|
||||
\begin_inset Formula $Ad_{T}$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
f(T,\xi_{b}+\delta\xi_{b})=Ad_{T}(\widehat{\xi_{b}+\delta\xi_{b}})=Ad_{T}(\hat{\xi}_{b})+Ad_{T}(\delta\hat{\xi}_{b})
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
D_{2}f_{(T,\xi_{b})}(\xi_{b})=Ad_{T}
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
We will derive
|
||||
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi)$
|
||||
\end_inset
|
||||
|
||||
using two approaches.
|
||||
In the first, we'll define
|
||||
\begin_inset Formula $g(T,\xi)\triangleq T\exp\hat{\xi}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
From Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "sec:Derivatives-of-Actions"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
,
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
D_{2}g_{(T,\xi)}(\xi) & =T\hat{\xi}\\
|
||||
D_{2}g_{(T,\xi)}^{-1}(\xi) & =-\hat{\xi}T^{-1}
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
Now we can use the definition of the Adjoint representation
|
||||
\begin_inset Formula $Ad_{g}\hat{\xi}=g\hat{\xi}g^{-1}$
|
||||
\end_inset
|
||||
|
||||
(aka conjugation by
|
||||
\begin_inset Formula $g$
|
||||
\end_inset
|
||||
|
||||
) then apply product rule and simplify:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
D_{1}f_{(T,\xi_{b})}(\xi)=D_{1}\left(Ad_{T\exp(\hat{\xi})}\hat{\xi}_{b}\right)(\xi) & =D_{1}\left(g\hat{\xi}_{b}g^{-1}\right)(\xi)\\
|
||||
& =\left(D_{2}g_{(T,\xi)}(\xi)\right)\hat{\xi}_{b}g^{-1}(T,0)+g(T,0)\hat{\xi}_{b}\left(D_{2}g_{(T,\xi)}^{-1}(\xi)\right)\\
|
||||
& =T\hat{\xi}\hat{\xi}_{b}T^{-1}-T\hat{\xi}_{b}\hat{\xi}T^{-1}\\
|
||||
& =T\left(\hat{\xi}\hat{\xi}_{b}-\hat{\xi}_{b}\hat{\xi}\right)T^{-1}\\
|
||||
& =Ad_{T}(ad_{\hat{\xi}}\hat{\xi}_{b})\\
|
||||
& =-Ad_{T}(ad_{\hat{\xi}_{b}}\hat{\xi})\\
|
||||
D_{1}F_{(T,\xi_{b})} & =-(Ad_{T})(ad_{\hat{\xi}_{b}})
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
Where
|
||||
\begin_inset Formula $ad_{\hat{\xi}}:\mathfrak{g}\rightarrow\mathfrak{g}$
|
||||
\end_inset
|
||||
|
||||
is the adjoint map of the lie algebra.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The second, perhaps more intuitive way of deriving
|
||||
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi_{b})$
|
||||
\end_inset
|
||||
|
||||
, would be to use the fact that the derivative at the origin
|
||||
\begin_inset Formula $D_{1}Ad_{I}\hat{\xi}_{b}=ad_{\hat{\xi}_{b}}$
|
||||
\end_inset
|
||||
|
||||
by definition of the adjoint
|
||||
\begin_inset Formula $ad_{\xi}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
Then applying the property
|
||||
\begin_inset Formula $Ad_{AB}=Ad_{A}Ad_{B}$
|
||||
\end_inset
|
||||
|
||||
,
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
D_{1}Ad_{T}\hat{\xi}_{b}(\xi)=D_{1}Ad_{T*I}\hat{\xi}_{b}(\xi)=Ad_{T}\left(D_{1}Ad_{I}\hat{\xi}_{b}(\xi)\right)=Ad_{T}\left(ad_{\hat{\xi}}(\hat{\xi}_{b})\right)=-Ad_{T}\left(ad_{\hat{\xi}_{b}}(\hat{\xi})\right)
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
Derivative of AdjointTranspose
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The transpose of the Adjoint,
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $Ad_{T}^{T}:\mathfrak{g^{*}\rightarrow g^{*}}$
|
||||
\end_inset
|
||||
|
||||
, is useful as a way to change the reference frame of vectors in the dual
|
||||
space
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
(note the
|
||||
\begin_inset Formula $^{*}$
|
||||
\end_inset
|
||||
|
||||
denoting that we are now in the dual space)
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
.
|
||||
To be more concrete, where
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
as
|
||||
\begin_inset Formula $Ad_{T}\hat{\xi}_{b}$
|
||||
\end_inset
|
||||
|
||||
converts the
|
||||
\emph on
|
||||
twist
|
||||
\emph default
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $\xi_{b}$
|
||||
\end_inset
|
||||
|
||||
from the
|
||||
\begin_inset Formula $T$
|
||||
\end_inset
|
||||
|
||||
frame,
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
converts the
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph on
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
wrench
|
||||
\emph default
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $\xi_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
from the
|
||||
\begin_inset Formula $T$
|
||||
\end_inset
|
||||
|
||||
frame
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
.
|
||||
It's difficult to apply a similar derivation as in Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "subsec:pose3_adjoint_deriv"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
for the derivative of
|
||||
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
because
|
||||
\begin_inset Formula $Ad_{T}^{T}$
|
||||
\end_inset
|
||||
|
||||
cannot be naturally defined as a conjugation, so we resort to crunching
|
||||
through the algebra.
|
||||
The details are omitted but the result is a form that vaguely resembles
|
||||
(but does not exactly match)
|
||||
\begin_inset Formula $ad(Ad_{T}^{T}\hat{\xi}_{b}^{*})$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
\begin{bmatrix}\omega_{T}\\
|
||||
v_{T}
|
||||
\end{bmatrix}^{*} & \triangleq Ad_{T}^{T}\hat{\xi}_{b}^{*}\\
|
||||
D_{1}Ad_{T}^{T}\hat{\xi}_{b}^{*}(\xi) & =\begin{bmatrix}\hat{\omega}_{T} & \hat{v}_{T}\\
|
||||
\hat{v}_{T} & 0
|
||||
\end{bmatrix}
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
|
@ -6286,7 +6674,7 @@ One representation of a line is through 2 vectors
|
|||
\begin_inset Formula $d$
|
||||
\end_inset
|
||||
|
||||
points from the orgin to the closest point on the line.
|
||||
points from the origin to the closest point on the line.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
|
|
BIN
doc/math.pdf
BIN
doc/math.pdf
Binary file not shown.
88
doc/refs.bib
88
doc/refs.bib
|
@ -1,26 +1,72 @@
|
|||
%% This BibTeX bibliography file was created using BibDesk.
|
||||
%% https://bibdesk.sourceforge.io/
|
||||
|
||||
%% Created for Varun Agrawal at 2021-09-27 17:39:09 -0400
|
||||
|
||||
|
||||
%% Saved with string encoding Unicode (UTF-8)
|
||||
|
||||
|
||||
|
||||
@article{Lupton12tro,
|
||||
author = {Lupton, Todd and Sukkarieh, Salah},
|
||||
date-added = {2021-09-27 17:38:56 -0400},
|
||||
date-modified = {2021-09-27 17:39:09 -0400},
|
||||
doi = {10.1109/TRO.2011.2170332},
|
||||
journal = {IEEE Transactions on Robotics},
|
||||
number = {1},
|
||||
pages = {61-76},
|
||||
title = {Visual-Inertial-Aided Navigation for High-Dynamic Motion in Built Environments Without Initial Conditions},
|
||||
volume = {28},
|
||||
year = {2012},
|
||||
Bdsk-Url-1 = {https://doi.org/10.1109/TRO.2011.2170332}}
|
||||
|
||||
@inproceedings{Forster15rss,
|
||||
author = {Christian Forster and Luca Carlone and Frank Dellaert and Davide Scaramuzza},
|
||||
booktitle = {Robotics: Science and Systems},
|
||||
date-added = {2021-09-26 20:44:41 -0400},
|
||||
date-modified = {2021-09-26 20:45:03 -0400},
|
||||
title = {IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation},
|
||||
year = {2015}}
|
||||
|
||||
@article{Iserles00an,
|
||||
title = {Lie-group methods},
|
||||
author = {Iserles, Arieh and Munthe-Kaas, Hans Z and
|
||||
N{\o}rsett, Syvert P and Zanna, Antonella},
|
||||
journal = {Acta Numerica 2000},
|
||||
volume = {9},
|
||||
pages = {215--365},
|
||||
year = {2000},
|
||||
publisher = {Cambridge Univ Press}
|
||||
}
|
||||
author = {Iserles, Arieh and Munthe-Kaas, Hans Z and N{\o}rsett, Syvert P and Zanna, Antonella},
|
||||
journal = {Acta Numerica 2000},
|
||||
pages = {215--365},
|
||||
publisher = {Cambridge Univ Press},
|
||||
title = {Lie-group methods},
|
||||
volume = {9},
|
||||
year = {2000}}
|
||||
|
||||
@book{Murray94book,
|
||||
title = {A mathematical introduction to robotic manipulation},
|
||||
author = {Murray, Richard M and Li, Zexiang and Sastry, S
|
||||
Shankar and Sastry, S Shankara},
|
||||
year = {1994},
|
||||
publisher = {CRC press}
|
||||
}
|
||||
author = {Murray, Richard M and Li, Zexiang and Sastry, S Shankar and Sastry, S Shankara},
|
||||
publisher = {CRC press},
|
||||
title = {A mathematical introduction to robotic manipulation},
|
||||
year = {1994}}
|
||||
|
||||
@book{Spivak65book,
|
||||
title = {Calculus on manifolds},
|
||||
author = {Spivak, Michael},
|
||||
volume = {1},
|
||||
year = {1965},
|
||||
publisher = {WA Benjamin New York}
|
||||
}
|
||||
author = {Spivak, Michael},
|
||||
publisher = {WA Benjamin New York},
|
||||
title = {Calculus on manifolds},
|
||||
volume = {1},
|
||||
year = {1965}}
|
||||
|
||||
@phdthesis{Nikolic16thesis,
|
||||
title={Characterisation, calibration, and design of visual-inertial sensor systems for robot navigation},
|
||||
author={Nikolic, Janosch},
|
||||
year={2016},
|
||||
school={ETH Zurich}
|
||||
}
|
||||
|
||||
@book{Simon06book,
|
||||
title={Optimal state estimation: Kalman, H infinity, and nonlinear approaches},
|
||||
author={Simon, Dan},
|
||||
year={2006},
|
||||
publisher={John Wiley \& Sons}
|
||||
}
|
||||
|
||||
@inproceedings{Trawny05report_IndirectKF,
|
||||
title={Indirect Kalman Filter for 3 D Attitude Estimation},
|
||||
author={Nikolas Trawny and Stergios I. Roumeliotis},
|
||||
year={2005}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,57 @@
|
|||
# Instructions
|
||||
|
||||
Build all docker images, in order:
|
||||
# Images on Docker Hub
|
||||
|
||||
There are 4 images available on https://hub.docker.com/orgs/borglab/repositories:
|
||||
|
||||
- `borglab/ubuntu-boost-tbb`: 18.06 Linux (nicknamed `bionic`) base image, with Boost and TBB installed.
|
||||
- `borglab/ubuntu-gtsam`: GTSAM Release version installed in `/usr/local`.
|
||||
- `borglab/ubuntu-gtsam-python`: installed GTSAM with python wrapper.
|
||||
- `borglab/ubuntu-gtsam-python-vnc`: image with GTSAM+python wrapper that will run a VNC server to connect to.
|
||||
|
||||
# Using the images
|
||||
|
||||
## Just GTSAM
|
||||
|
||||
To start the Docker image, execute
|
||||
```bash
|
||||
docker run -it borglab/ubuntu-gtsam:bionic
|
||||
```
|
||||
after you will find yourself in a bash shell, in the directory `/usr/src/gtsam/build`.
|
||||
## GTSAM with Python wrapper
|
||||
|
||||
To use GTSAM via the python wrapper, similarly execute
|
||||
```bash
|
||||
docker run -it borglab/ubuntu-gtsam-python:bionic
|
||||
```
|
||||
and then launch `python3`:
|
||||
```bash
|
||||
python3
|
||||
>>> import gtsam
|
||||
>>> gtsam.Pose2(1,2,3)
|
||||
(1, 2, 3)
|
||||
```
|
||||
|
||||
## GTSAM with Python wrapper and VNC
|
||||
|
||||
First, start the docker image, which will run a VNC server on port 5900:
|
||||
```bash
|
||||
docker run -p 5900:5900 borglab/ubuntu-gtsam-python-vnc:bionic
|
||||
```
|
||||
|
||||
Then open a remote VNC X client, for example:
|
||||
|
||||
### Linux
|
||||
```bash
|
||||
sudo apt-get install tigervnc-viewer
|
||||
xtigervncviewer :5900
|
||||
```
|
||||
### Mac
|
||||
The Finder's "Connect to Server..." with `vnc://127.0.0.1` does not work, for some reason. Using the free [VNC Viewer](https://www.realvnc.com/en/connect/download/viewer/), enter `0.0.0.0:5900` as the server.
|
||||
|
||||
# Re-building the images locally
|
||||
|
||||
To build all docker images, in order:
|
||||
|
||||
```bash
|
||||
(cd ubuntu-boost-tbb && ./build.sh)
|
||||
|
@ -9,13 +60,4 @@ Build all docker images, in order:
|
|||
(cd ubuntu-gtsam-python-vnc && ./build.sh)
|
||||
```
|
||||
|
||||
Then launch with:
|
||||
|
||||
docker run -p 5900:5900 dellaert/ubuntu-gtsam-python-vnc:bionic
|
||||
|
||||
Then open a remote VNC X client, for example:
|
||||
|
||||
sudo apt-get install tigervnc-viewer
|
||||
xtigervncviewer :5900
|
||||
|
||||
|
||||
Note: building GTSAM can take a lot of memory because of the heavy templating. It is advisable to give Docker enough resources, e.g., 8GB, to avoid OOM errors while compiling.
|
|
@ -1,3 +1,3 @@
|
|||
# Build command for Docker image
|
||||
# TODO(dellaert): use docker compose and/or cmake
|
||||
docker build --no-cache -t dellaert/ubuntu-boost-tbb:bionic .
|
||||
docker build --no-cache -t borglab/ubuntu-boost-tbb:bionic .
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# This GTSAM image connects to the host X-server via VNC to provide a Graphical User Interface for interaction.
|
||||
|
||||
# Get the base Ubuntu/GTSAM image from Docker Hub
|
||||
FROM dellaert/ubuntu-gtsam-python:bionic
|
||||
FROM borglab/ubuntu-gtsam-python:bionic
|
||||
|
||||
# Things needed to get a python GUI
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Build command for Docker image
|
||||
# TODO(dellaert): use docker compose and/or cmake
|
||||
# Needs to be run in docker/ubuntu-gtsam-python-vnc directory
|
||||
docker build -t dellaert/ubuntu-gtsam-python-vnc:bionic .
|
||||
docker build -t borglab/ubuntu-gtsam-python-vnc:bionic .
|
||||
|
|
|
@ -2,4 +2,4 @@
|
|||
docker run -it \
|
||||
--workdir="/usr/src/gtsam" \
|
||||
-p 5900:5900 \
|
||||
dellaert/ubuntu-gtsam-python-vnc:bionic
|
||||
borglab/ubuntu-gtsam-python-vnc:bionic
|
|
@ -1,7 +1,7 @@
|
|||
# GTSAM Ubuntu image with Python wrapper support.
|
||||
|
||||
# Get the base Ubuntu/GTSAM image from Docker Hub
|
||||
FROM dellaert/ubuntu-gtsam:bionic
|
||||
FROM borglab/ubuntu-gtsam:bionic
|
||||
|
||||
# Install pip
|
||||
RUN apt-get install -y python3-pip python3-dev
|
||||
|
@ -22,7 +22,9 @@ RUN cmake \
|
|||
..
|
||||
|
||||
# Build again, as ubuntu-gtsam image cleaned
|
||||
RUN make -j4 install && make clean
|
||||
RUN make -j4 install
|
||||
RUN make python-install
|
||||
RUN make clean
|
||||
|
||||
# Needed to run python wrapper:
|
||||
RUN echo 'export PYTHONPATH=/usr/local/python/:$PYTHONPATH' >> /root/.bashrc
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# Build command for Docker image
|
||||
# TODO(dellaert): use docker compose and/or cmake
|
||||
docker build --no-cache -t dellaert/ubuntu-gtsam-python:bionic .
|
||||
docker build --no-cache -t borglab/ubuntu-gtsam-python:bionic .
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Ubuntu image with GTSAM installed. Configured with Boost and TBB support.
|
||||
|
||||
# Get the base Ubuntu image from Docker Hub
|
||||
FROM dellaert/ubuntu-boost-tbb:bionic
|
||||
FROM borglab/ubuntu-boost-tbb:bionic
|
||||
|
||||
# Install git
|
||||
RUN apt-get update && \
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# Build command for Docker image
|
||||
# TODO(dellaert): use docker compose and/or cmake
|
||||
docker build --no-cache -t dellaert/ubuntu-gtsam:bionic .
|
||||
docker build --no-cache -t borglab/ubuntu-gtsam:bionic .
|
||||
|
|
|
@ -2,4 +2,4 @@ set (excluded_examples
|
|||
elaboratePoint2KalmanFilter.cpp
|
||||
)
|
||||
|
||||
gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam;${Boost_PROGRAM_OPTIONS_LIBRARY}")
|
||||
gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam;gtsam_unstable;${Boost_PROGRAM_OPTIONS_LIBRARY}")
|
||||
|
|
|
@ -30,8 +30,8 @@ using symbol_shorthand::X;
|
|||
* Unary factor on the unknown pose, resulting from meauring the projection of
|
||||
* a known 3D point in the image
|
||||
*/
|
||||
class ResectioningFactor: public NoiseModelFactor1<Pose3> {
|
||||
typedef NoiseModelFactor1<Pose3> Base;
|
||||
class ResectioningFactor: public NoiseModelFactorN<Pose3> {
|
||||
typedef NoiseModelFactorN<Pose3> Base;
|
||||
|
||||
Cal3_S2::shared_ptr K_; ///< camera's intrinsic parameters
|
||||
Point3 P_; ///< 3D point on the calibration rig
|
||||
|
|
|
@ -23,7 +23,8 @@
|
|||
* A row starting with "i" is the first initial position formatted with
|
||||
* N, E, D, qx, qY, qZ, qW, velN, velE, velD
|
||||
* A row starting with "0" is an imu measurement
|
||||
* linAccN, linAccE, linAccD, angVelN, angVelE, angVelD
|
||||
* (body frame - Forward, Right, Down)
|
||||
* linAccX, linAccY, linAccZ, angVelX, angVelY, angVelX
|
||||
* A row starting with "1" is a gps correction formatted with
|
||||
* N, E, D, qX, qY, qZ, qW
|
||||
* Note that for GPS correction, we're only using the position not the
|
||||
|
@ -59,13 +60,14 @@ namespace po = boost::program_options;
|
|||
|
||||
po::variables_map parseOptions(int argc, char* argv[]) {
|
||||
po::options_description desc;
|
||||
desc.add_options()("help,h", "produce help message")(
|
||||
"data_csv_path", po::value<string>()->default_value("imuAndGPSdata.csv"),
|
||||
"path to the CSV file with the IMU data")(
|
||||
"output_filename",
|
||||
po::value<string>()->default_value("imuFactorExampleResults.csv"),
|
||||
"path to the result file to use")("use_isam", po::bool_switch(),
|
||||
"use ISAM as the optimizer");
|
||||
desc.add_options()("help,h", "produce help message") // help message
|
||||
("data_csv_path", po::value<string>()->default_value("imuAndGPSdata.csv"),
|
||||
"path to the CSV file with the IMU data") // path to the data file
|
||||
("output_filename",
|
||||
po::value<string>()->default_value("imuFactorExampleResults.csv"),
|
||||
"path to the result file to use") // filename to save results to
|
||||
("use_isam", po::bool_switch(),
|
||||
"use ISAM as the optimizer"); // flag for ISAM optimizer
|
||||
|
||||
po::variables_map vm;
|
||||
po::store(po::parse_command_line(argc, argv, desc), vm);
|
||||
|
@ -105,7 +107,7 @@ boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
|
|||
I_3x3 * 1e-8; // error committed in integrating position from velocities
|
||||
Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2);
|
||||
Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2);
|
||||
Matrix66 bias_acc_omega_int =
|
||||
Matrix66 bias_acc_omega_init =
|
||||
I_6x6 * 1e-5; // error in the bias used for preintegration
|
||||
|
||||
auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0);
|
||||
|
@ -121,7 +123,7 @@ boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
|
|||
// PreintegrationCombinedMeasurements params:
|
||||
p->biasAccCovariance = bias_acc_cov; // acc bias in continuous
|
||||
p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous
|
||||
p->biasAccOmegaInt = bias_acc_omega_int;
|
||||
p->biasAccOmegaInt = bias_acc_omega_init;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
|
|
@ -15,8 +15,8 @@
|
|||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/geometry/CalibratedCamera.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
#include <boost/assign/std/vector.hpp>
|
||||
|
||||
|
@ -26,22 +26,16 @@ using namespace gtsam;
|
|||
|
||||
/* ************************************************************************* */
|
||||
|
||||
void createExampleBALFile(const string& filename, const vector<Point3>& P,
|
||||
const Pose3& pose1, const Pose3& pose2, const Cal3Bundler& K =
|
||||
Cal3Bundler()) {
|
||||
|
||||
void createExampleBALFile(const string& filename, const vector<Point3>& points,
|
||||
const Pose3& pose1, const Pose3& pose2,
|
||||
const Cal3Bundler& K = Cal3Bundler()) {
|
||||
// Class that will gather all data
|
||||
SfmData data;
|
||||
|
||||
// Create two cameras
|
||||
Rot3 aRb = Rot3::Yaw(M_PI_2);
|
||||
Point3 aTb(0.1, 0, 0);
|
||||
Pose3 identity, aPb(aRb, aTb);
|
||||
// Create two cameras and add them to data
|
||||
data.cameras.push_back(SfmCamera(pose1, K));
|
||||
data.cameras.push_back(SfmCamera(pose2, K));
|
||||
|
||||
for(const Point3& p: P) {
|
||||
|
||||
for (const Point3& p : points) {
|
||||
// Create the track
|
||||
SfmTrack track;
|
||||
track.p = p;
|
||||
|
@ -51,7 +45,7 @@ void createExampleBALFile(const string& filename, const vector<Point3>& P,
|
|||
|
||||
// Project points in both cameras
|
||||
for (size_t i = 0; i < 2; i++)
|
||||
track.measurements.push_back(make_pair(i, data.cameras[i].project(p)));
|
||||
track.measurements.push_back(make_pair(i, data.cameras[i].project(p)));
|
||||
|
||||
// Add track to data
|
||||
data.tracks.push_back(track);
|
||||
|
@ -63,49 +57,66 @@ void createExampleBALFile(const string& filename, const vector<Point3>& P,
|
|||
/* ************************************************************************* */
|
||||
|
||||
void create5PointExample1() {
|
||||
|
||||
// Create two cameras poses
|
||||
Rot3 aRb = Rot3::Yaw(M_PI_2);
|
||||
Point3 aTb(0.1, 0, 0);
|
||||
Pose3 pose1, pose2(aRb, aTb);
|
||||
|
||||
// Create test data, we need at least 5 points
|
||||
vector<Point3> P;
|
||||
P += Point3(0, 0, 1), Point3(-0.1, 0, 1), Point3(0.1, 0, 1), //
|
||||
Point3(0, 0.5, 0.5), Point3(0, -0.5, 0.5);
|
||||
vector<Point3> points = {
|
||||
{0, 0, 1}, {-0.1, 0, 1}, {0.1, 0, 1}, {0, 0.5, 0.5}, {0, -0.5, 0.5}};
|
||||
|
||||
// Assumes example is run in ${GTSAM_TOP}/build/examples
|
||||
const string filename = "../../examples/data/5pointExample1.txt";
|
||||
createExampleBALFile(filename, P, pose1, pose2);
|
||||
const string filename = "../../examples/Data/5pointExample1.txt";
|
||||
createExampleBALFile(filename, points, pose1, pose2);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
void create5PointExample2() {
|
||||
|
||||
// Create two cameras poses
|
||||
Rot3 aRb = Rot3::Yaw(M_PI_2);
|
||||
Point3 aTb(10, 0, 0);
|
||||
Pose3 pose1, pose2(aRb, aTb);
|
||||
|
||||
// Create test data, we need at least 5 points
|
||||
vector<Point3> P;
|
||||
P += Point3(0, 0, 100), Point3(-10, 0, 100), Point3(10, 0, 100), //
|
||||
Point3(0, 50, 50), Point3(0, -50, 50), Point3(-20, 0, 80), Point3(20, -50, 80);
|
||||
vector<Point3> points = {{0, 0, 100}, {-10, 0, 100}, {10, 0, 100}, //
|
||||
{0, 50, 50}, {0, -50, 50}, {-20, 0, 80}, //
|
||||
{20, -50, 80}};
|
||||
|
||||
// Assumes example is run in ${GTSAM_TOP}/build/examples
|
||||
const string filename = "../../examples/data/5pointExample2.txt";
|
||||
const string filename = "../../examples/Data/5pointExample2.txt";
|
||||
Cal3Bundler K(500, 0, 0);
|
||||
createExampleBALFile(filename, P, pose1, pose2,K);
|
||||
createExampleBALFile(filename, points, pose1, pose2, K);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
void create18PointExample1() {
|
||||
// Create two cameras poses
|
||||
Rot3 aRb = Rot3::Yaw(M_PI_2);
|
||||
Point3 aTb(0.1, 0, 0);
|
||||
Pose3 pose1, pose2(aRb, aTb);
|
||||
|
||||
// Create test data, we need 15 points
|
||||
vector<Point3> points = {
|
||||
{0, 0, 1}, {-0.1, 0, 1}, {0.1, 0, 1}, //
|
||||
{0, 0.5, 0.5}, {0, -0.5, 0.5}, {-1, -0.5, 2}, //
|
||||
{-1, 0.5, 2}, {0.25, -0.5, 1.5}, {0.25, 0.5, 1.5}, //
|
||||
{-0.1, -0.5, 0.5}, {0.1, -0.5, 1}, {0.1, 0.5, 1}, //
|
||||
{-0.1, 0, 0.5}, {-0.1, 0.5, 0.5}, {0, 0, 0.5}, //
|
||||
{0.1, -0.5, 0.5}, {0.1, 0, 0.5}, {0.1, 0.5, 0.5}};
|
||||
|
||||
// Assumes example is run in ${GTSAM_TOP}/build/examples
|
||||
const string filename = "../../examples/Data/18pointExample1.txt";
|
||||
createExampleBALFile(filename, points, pose1, pose2);
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
create5PointExample1();
|
||||
create5PointExample2();
|
||||
create18PointExample1();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
|
|
|
@ -0,0 +1,131 @@
|
|||
2 18 36
|
||||
|
||||
0 0 0 -0
|
||||
1 0 -6.123233995736766344e-18 -0.10000000000000000555
|
||||
0 1 -0.10000000000000000555 -0
|
||||
1 1 -1.2246467991473532688e-17 -0.2000000000000000111
|
||||
0 2 0.10000000000000000555 -0
|
||||
1 2 0 -0
|
||||
0 3 0 -1
|
||||
1 3 1 -0.20000000000000006661
|
||||
0 4 0 1
|
||||
1 4 -1 -0.19999999999999995559
|
||||
0 5 -0.5 0.25
|
||||
1 5 -0.25000000000000005551 -0.55000000000000004441
|
||||
0 6 -0.5 -0.25
|
||||
1 6 0.24999999999999997224 -0.55000000000000004441
|
||||
0 7 0.16666666666666665741 0.33333333333333331483
|
||||
1 7 -0.33333333333333331483 0.10000000000000000555
|
||||
0 8 0.16666666666666665741 -0.33333333333333331483
|
||||
1 8 0.33333333333333331483 0.099999999999999977796
|
||||
0 9 -0.2000000000000000111 1
|
||||
1 9 -1 -0.39999999999999996669
|
||||
0 10 0.10000000000000000555 0.5
|
||||
1 10 -0.5 3.0616169978683830179e-17
|
||||
0 11 0.10000000000000000555 -0.5
|
||||
1 11 0.5 -3.0616169978683830179e-17
|
||||
0 12 -0.2000000000000000111 -0
|
||||
1 12 -2.4492935982947065376e-17 -0.4000000000000000222
|
||||
0 13 -0.2000000000000000111 -1
|
||||
1 13 1 -0.40000000000000007772
|
||||
0 14 0 -0
|
||||
1 14 -1.2246467991473532688e-17 -0.2000000000000000111
|
||||
0 15 0.2000000000000000111 1
|
||||
1 15 -1 6.1232339957367660359e-17
|
||||
0 16 0.2000000000000000111 -0
|
||||
1 16 0 -0
|
||||
0 17 0.2000000000000000111 -1
|
||||
1 17 1 -6.1232339957367660359e-17
|
||||
|
||||
3.141592653589793116
|
||||
0
|
||||
0
|
||||
-0
|
||||
0
|
||||
0
|
||||
1
|
||||
0
|
||||
0
|
||||
|
||||
2.2214414690791830509
|
||||
2.2214414690791826068
|
||||
0
|
||||
-6.123233995736766344e-18
|
||||
-0.10000000000000000555
|
||||
0
|
||||
1
|
||||
0
|
||||
0
|
||||
|
||||
0
|
||||
0
|
||||
1
|
||||
|
||||
-0.10000000000000000555
|
||||
0
|
||||
1
|
||||
|
||||
0.10000000000000000555
|
||||
0
|
||||
1
|
||||
|
||||
0
|
||||
0.5
|
||||
0.5
|
||||
|
||||
0
|
||||
-0.5
|
||||
0.5
|
||||
|
||||
-1
|
||||
-0.5
|
||||
2
|
||||
|
||||
-1
|
||||
0.5
|
||||
2
|
||||
|
||||
0.25
|
||||
-0.5
|
||||
1.5
|
||||
|
||||
0.25
|
||||
0.5
|
||||
1.5
|
||||
|
||||
-0.10000000000000000555
|
||||
-0.5
|
||||
0.5
|
||||
|
||||
0.10000000000000000555
|
||||
-0.5
|
||||
1
|
||||
|
||||
0.10000000000000000555
|
||||
0.5
|
||||
1
|
||||
|
||||
-0.10000000000000000555
|
||||
0
|
||||
0.5
|
||||
|
||||
-0.10000000000000000555
|
||||
0.5
|
||||
0.5
|
||||
|
||||
0
|
||||
0
|
||||
0.5
|
||||
|
||||
0.10000000000000000555
|
||||
-0.5
|
||||
0.5
|
||||
|
||||
0.10000000000000000555
|
||||
0
|
||||
0.5
|
||||
|
||||
0.10000000000000000555
|
||||
0.5
|
||||
0.5
|
||||
|
|
@ -7,7 +7,7 @@
|
|||
<count>32</count>
|
||||
<item_version>1</item_version>
|
||||
<item class_id="3" tracking_level="0" version="1">
|
||||
<px class_id="4" class_name="JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<px class_id="4" class_name="gtsam::JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<Base class_id="5" tracking_level="0" version="0">
|
||||
<Base class_id="6" tracking_level="0" version="0">
|
||||
<keys_>
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
<count>2</count>
|
||||
<item_version>1</item_version>
|
||||
<item class_id="3" tracking_level="0" version="1">
|
||||
<px class_id="4" class_name="JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<px class_id="4" class_name="gtsam::JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<Base class_id="5" tracking_level="0" version="0">
|
||||
<Base class_id="6" tracking_level="0" version="0">
|
||||
<keys_>
|
||||
|
|
|
@ -53,11 +53,10 @@ int main(int argc, char **argv) {
|
|||
// Create solver and eliminate
|
||||
Ordering ordering;
|
||||
ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7);
|
||||
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
||||
|
||||
// solve
|
||||
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||
GTSAM_PRINT(*mpe);
|
||||
auto mpe = fg.optimize();
|
||||
GTSAM_PRINT(mpe);
|
||||
|
||||
// We can also build a Bayes tree (directed junction tree).
|
||||
// The elimination order above will do fine:
|
||||
|
@ -69,15 +68,15 @@ int main(int argc, char **argv) {
|
|||
fg.add(Dyspnea, "0 1");
|
||||
|
||||
// solve again, now with evidence
|
||||
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
||||
DiscreteFactor::sharedValues mpe2 = chordal2->optimize();
|
||||
GTSAM_PRINT(*mpe2);
|
||||
auto mpe2 = fg.optimize();
|
||||
GTSAM_PRINT(mpe2);
|
||||
|
||||
// We can also sample from it
|
||||
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
DiscreteFactor::sharedValues sample = chordal2->sample();
|
||||
GTSAM_PRINT(*sample);
|
||||
auto sample = chordal->sample();
|
||||
GTSAM_PRINT(sample);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -33,11 +33,11 @@ using namespace gtsam;
|
|||
int main(int argc, char **argv) {
|
||||
// Define keys and a print function
|
||||
Key C(1), S(2), R(3), W(4);
|
||||
auto print = [=](DiscreteFactor::sharedValues values) {
|
||||
cout << boolalpha << "Cloudy = " << static_cast<bool>((*values)[C])
|
||||
<< " Sprinkler = " << static_cast<bool>((*values)[S])
|
||||
<< " Rain = " << boolalpha << static_cast<bool>((*values)[R])
|
||||
<< " WetGrass = " << static_cast<bool>((*values)[W]) << endl;
|
||||
auto print = [=](const DiscreteFactor::Values& values) {
|
||||
cout << boolalpha << "Cloudy = " << static_cast<bool>(values.at(C))
|
||||
<< " Sprinkler = " << static_cast<bool>(values.at(S))
|
||||
<< " Rain = " << boolalpha << static_cast<bool>(values.at(R))
|
||||
<< " WetGrass = " << static_cast<bool>(values.at(W)) << endl;
|
||||
};
|
||||
|
||||
// We assume binary state variables
|
||||
|
@ -85,7 +85,7 @@ int main(int argc, char **argv) {
|
|||
}
|
||||
|
||||
// "Most Probable Explanation", i.e., configuration with largest value
|
||||
DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize();
|
||||
auto mpe = graph.optimize();
|
||||
cout << "\nMost Probable Explanation (MPE):" << endl;
|
||||
print(mpe);
|
||||
|
||||
|
@ -96,8 +96,7 @@ int main(int argc, char **argv) {
|
|||
graph.add(Cloudy, "1 0");
|
||||
|
||||
// solve again, now with evidence
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize();
|
||||
auto mpe_with_evidence = graph.optimize();
|
||||
|
||||
cout << "\nMPE given C=0:" << endl;
|
||||
print(mpe_with_evidence);
|
||||
|
@ -110,10 +109,11 @@ int main(int argc, char **argv) {
|
|||
cout << "\nP(W=1|C=0):" << marginals.marginalProbabilities(WetGrass)[1]
|
||||
<< endl;
|
||||
|
||||
// We can also sample from it
|
||||
// We can also sample from the eliminated graph
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||
auto sample = chordal->sample();
|
||||
print(sample);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -122,8 +122,7 @@ int main(int argc, char *argv[]) {
|
|||
std::cout << "initial error=" << graph.error(initialEstimate) << std::endl;
|
||||
std::cout << "final error=" << graph.error(result) << std::endl;
|
||||
|
||||
std::ofstream os("examples/vio_batch.dot");
|
||||
graph.saveGraph(os, result);
|
||||
graph.saveGraph("examples/vio_batch.dot", result);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -59,21 +59,21 @@ int main(int argc, char **argv) {
|
|||
// Convert to factor graph
|
||||
DiscreteFactorGraph factorGraph(hmm);
|
||||
|
||||
// Do max-prodcut
|
||||
auto mpe = factorGraph.optimize();
|
||||
GTSAM_PRINT(mpe);
|
||||
|
||||
// Create solver and eliminate
|
||||
// This will create a DAG ordered with arrow of time reversed
|
||||
DiscreteBayesNet::shared_ptr chordal =
|
||||
factorGraph.eliminateSequential(ordering);
|
||||
chordal->print("Eliminated");
|
||||
|
||||
// solve
|
||||
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||
GTSAM_PRINT(*mpe);
|
||||
|
||||
// We can also sample from it
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t k = 0; k < 10; k++) {
|
||||
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||
GTSAM_PRINT(*sample);
|
||||
auto sample = chordal->sample();
|
||||
GTSAM_PRINT(sample);
|
||||
}
|
||||
|
||||
// Or compute the marginals. This re-eliminates the FG into a Bayes tree
|
||||
|
|
|
@ -11,21 +11,23 @@
|
|||
|
||||
/**
|
||||
* @file IMUKittiExampleGPS
|
||||
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE
|
||||
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics
|
||||
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI
|
||||
* VISION BENCHMARK SUITE
|
||||
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ
|
||||
* Electronics
|
||||
*/
|
||||
|
||||
// GTSAM related includes.
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||
#include <gtsam/navigation/GPSFactor.h>
|
||||
#include <gtsam/navigation/ImuFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/nonlinear/ISAM2.h>
|
||||
#include <gtsam/nonlinear/ISAM2Params.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
|
@ -34,35 +36,35 @@
|
|||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
|
||||
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
|
||||
using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
|
||||
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
|
||||
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
|
||||
|
||||
struct KittiCalibration {
|
||||
double body_ptx;
|
||||
double body_pty;
|
||||
double body_ptz;
|
||||
double body_prx;
|
||||
double body_pry;
|
||||
double body_prz;
|
||||
double accelerometer_sigma;
|
||||
double gyroscope_sigma;
|
||||
double integration_sigma;
|
||||
double accelerometer_bias_sigma;
|
||||
double gyroscope_bias_sigma;
|
||||
double average_delta_t;
|
||||
double body_ptx;
|
||||
double body_pty;
|
||||
double body_ptz;
|
||||
double body_prx;
|
||||
double body_pry;
|
||||
double body_prz;
|
||||
double accelerometer_sigma;
|
||||
double gyroscope_sigma;
|
||||
double integration_sigma;
|
||||
double accelerometer_bias_sigma;
|
||||
double gyroscope_bias_sigma;
|
||||
double average_delta_t;
|
||||
};
|
||||
|
||||
struct ImuMeasurement {
|
||||
double time;
|
||||
double dt;
|
||||
Vector3 accelerometer;
|
||||
Vector3 gyroscope; // omega
|
||||
double time;
|
||||
double dt;
|
||||
Vector3 accelerometer;
|
||||
Vector3 gyroscope; // omega
|
||||
};
|
||||
|
||||
struct GpsMeasurement {
|
||||
double time;
|
||||
Vector3 position; // x,y,z
|
||||
double time;
|
||||
Vector3 position; // x,y,z
|
||||
};
|
||||
|
||||
const string output_filename = "IMUKittiExampleGPSResults.csv";
|
||||
|
@ -70,290 +72,313 @@ const string output_filename = "IMUKittiExampleGPSResults.csv";
|
|||
void loadKittiData(KittiCalibration& kitti_calibration,
|
||||
vector<ImuMeasurement>& imu_measurements,
|
||||
vector<GpsMeasurement>& gps_measurements) {
|
||||
string line;
|
||||
string line;
|
||||
|
||||
// Read IMU metadata and compute relative sensor pose transforms
|
||||
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma
|
||||
// AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT
|
||||
string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
|
||||
ifstream imu_metadata(imu_metadata_file.c_str());
|
||||
// Read IMU metadata and compute relative sensor pose transforms
|
||||
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma
|
||||
// GyroscopeSigma IntegrationSigma AccelerometerBiasSigma GyroscopeBiasSigma
|
||||
// AverageDeltaT
|
||||
string imu_metadata_file =
|
||||
findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
|
||||
ifstream imu_metadata(imu_metadata_file.c_str());
|
||||
|
||||
printf("-- Reading sensor metadata\n");
|
||||
printf("-- Reading sensor metadata\n");
|
||||
|
||||
getline(imu_metadata, line, '\n'); // ignore the first line
|
||||
getline(imu_metadata, line, '\n'); // ignore the first line
|
||||
|
||||
// Load Kitti calibration
|
||||
getline(imu_metadata, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&kitti_calibration.body_ptx,
|
||||
&kitti_calibration.body_pty,
|
||||
&kitti_calibration.body_ptz,
|
||||
&kitti_calibration.body_prx,
|
||||
&kitti_calibration.body_pry,
|
||||
&kitti_calibration.body_prz,
|
||||
&kitti_calibration.accelerometer_sigma,
|
||||
&kitti_calibration.gyroscope_sigma,
|
||||
&kitti_calibration.integration_sigma,
|
||||
&kitti_calibration.accelerometer_bias_sigma,
|
||||
&kitti_calibration.gyroscope_bias_sigma,
|
||||
&kitti_calibration.average_delta_t);
|
||||
printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
|
||||
kitti_calibration.body_ptx,
|
||||
kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz,
|
||||
kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry,
|
||||
kitti_calibration.body_prz,
|
||||
kitti_calibration.accelerometer_sigma,
|
||||
kitti_calibration.gyroscope_sigma,
|
||||
kitti_calibration.integration_sigma,
|
||||
kitti_calibration.accelerometer_bias_sigma,
|
||||
kitti_calibration.gyroscope_bias_sigma,
|
||||
kitti_calibration.average_delta_t);
|
||||
// Load Kitti calibration
|
||||
getline(imu_metadata, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&kitti_calibration.body_ptx, &kitti_calibration.body_pty,
|
||||
&kitti_calibration.body_ptz, &kitti_calibration.body_prx,
|
||||
&kitti_calibration.body_pry, &kitti_calibration.body_prz,
|
||||
&kitti_calibration.accelerometer_sigma,
|
||||
&kitti_calibration.gyroscope_sigma,
|
||||
&kitti_calibration.integration_sigma,
|
||||
&kitti_calibration.accelerometer_bias_sigma,
|
||||
&kitti_calibration.gyroscope_bias_sigma,
|
||||
&kitti_calibration.average_delta_t);
|
||||
printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
|
||||
kitti_calibration.body_ptx, kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz, kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry, kitti_calibration.body_prz,
|
||||
kitti_calibration.accelerometer_sigma,
|
||||
kitti_calibration.gyroscope_sigma, kitti_calibration.integration_sigma,
|
||||
kitti_calibration.accelerometer_bias_sigma,
|
||||
kitti_calibration.gyroscope_bias_sigma,
|
||||
kitti_calibration.average_delta_t);
|
||||
|
||||
// Read IMU data
|
||||
// Time dt accelX accelY accelZ omegaX omegaY omegaZ
|
||||
string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt");
|
||||
printf("-- Reading IMU measurements from file\n");
|
||||
{
|
||||
ifstream imu_data(imu_data_file.c_str());
|
||||
getline(imu_data, line, '\n'); // ignore the first line
|
||||
// Read IMU data
|
||||
// Time dt accelX accelY accelZ omegaX omegaY omegaZ
|
||||
string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt");
|
||||
printf("-- Reading IMU measurements from file\n");
|
||||
{
|
||||
ifstream imu_data(imu_data_file.c_str());
|
||||
getline(imu_data, line, '\n'); // ignore the first line
|
||||
|
||||
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0;
|
||||
while (!imu_data.eof()) {
|
||||
getline(imu_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&time, &dt,
|
||||
&acc_x, &acc_y, &acc_z,
|
||||
&gyro_x, &gyro_y, &gyro_z);
|
||||
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0,
|
||||
gyro_y = 0, gyro_z = 0;
|
||||
while (!imu_data.eof()) {
|
||||
getline(imu_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf", &time, &dt,
|
||||
&acc_x, &acc_y, &acc_z, &gyro_x, &gyro_y, &gyro_z);
|
||||
|
||||
ImuMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.dt = dt;
|
||||
measurement.accelerometer = Vector3(acc_x, acc_y, acc_z);
|
||||
measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z);
|
||||
imu_measurements.push_back(measurement);
|
||||
}
|
||||
ImuMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.dt = dt;
|
||||
measurement.accelerometer = Vector3(acc_x, acc_y, acc_z);
|
||||
measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z);
|
||||
imu_measurements.push_back(measurement);
|
||||
}
|
||||
}
|
||||
|
||||
// Read GPS data
|
||||
// Time,X,Y,Z
|
||||
string gps_data_file = findExampleDataFile("KittiGps_converted.txt");
|
||||
printf("-- Reading GPS measurements from file\n");
|
||||
{
|
||||
ifstream gps_data(gps_data_file.c_str());
|
||||
getline(gps_data, line, '\n'); // ignore the first line
|
||||
// Read GPS data
|
||||
// Time,X,Y,Z
|
||||
string gps_data_file = findExampleDataFile("KittiGps_converted.txt");
|
||||
printf("-- Reading GPS measurements from file\n");
|
||||
{
|
||||
ifstream gps_data(gps_data_file.c_str());
|
||||
getline(gps_data, line, '\n'); // ignore the first line
|
||||
|
||||
double time = 0, gps_x = 0, gps_y = 0, gps_z = 0;
|
||||
while (!gps_data.eof()) {
|
||||
getline(gps_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z);
|
||||
double time = 0, gps_x = 0, gps_y = 0, gps_z = 0;
|
||||
while (!gps_data.eof()) {
|
||||
getline(gps_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z);
|
||||
|
||||
GpsMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.position = Vector3(gps_x, gps_y, gps_z);
|
||||
gps_measurements.push_back(measurement);
|
||||
}
|
||||
GpsMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.position = Vector3(gps_x, gps_y, gps_z);
|
||||
gps_measurements.push_back(measurement);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
KittiCalibration kitti_calibration;
|
||||
vector<ImuMeasurement> imu_measurements;
|
||||
vector<GpsMeasurement> gps_measurements;
|
||||
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
|
||||
KittiCalibration kitti_calibration;
|
||||
vector<ImuMeasurement> imu_measurements;
|
||||
vector<GpsMeasurement> gps_measurements;
|
||||
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
|
||||
|
||||
Vector6 BodyP = (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz,
|
||||
kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz)
|
||||
.finished();
|
||||
auto body_T_imu = Pose3::Expmap(BodyP);
|
||||
if (!body_T_imu.equals(Pose3(), 1e-5)) {
|
||||
printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same");
|
||||
exit(-1);
|
||||
}
|
||||
Vector6 BodyP =
|
||||
(Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz, kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry, kitti_calibration.body_prz)
|
||||
.finished();
|
||||
auto body_T_imu = Pose3::Expmap(BodyP);
|
||||
if (!body_T_imu.equals(Pose3(), 1e-5)) {
|
||||
printf(
|
||||
"Currently only support IMUinBody is identity, i.e. IMU and body frame "
|
||||
"are the same");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
// Configure different variables
|
||||
// double t_offset = gps_measurements[0].time;
|
||||
size_t first_gps_pose = 1;
|
||||
size_t gps_skip = 10; // Skip this many GPS measurements each time
|
||||
double g = 9.8;
|
||||
auto w_coriolis = Vector3::Zero(); // zero vector
|
||||
// Configure different variables
|
||||
// double t_offset = gps_measurements[0].time;
|
||||
size_t first_gps_pose = 1;
|
||||
size_t gps_skip = 10; // Skip this many GPS measurements each time
|
||||
double g = 9.8;
|
||||
auto w_coriolis = Vector3::Zero(); // zero vector
|
||||
|
||||
// Configure noise models
|
||||
auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||
Vector3::Constant(1.0/0.07))
|
||||
.finished());
|
||||
// Configure noise models
|
||||
auto noise_model_gps = noiseModel::Diagonal::Precisions(
|
||||
(Vector6() << Vector3::Constant(0), Vector3::Constant(1.0 / 0.07))
|
||||
.finished());
|
||||
|
||||
// Set initial conditions for the estimated trajectory
|
||||
// initial pose is the reference frame (navigation frame)
|
||||
auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
||||
// the vehicle is stationary at the beginning at position 0,0,0
|
||||
Vector3 current_velocity_global = Vector3::Zero();
|
||||
auto current_bias = imuBias::ConstantBias(); // init with zero bias
|
||||
// Set initial conditions for the estimated trajectory
|
||||
// initial pose is the reference frame (navigation frame)
|
||||
auto current_pose_global =
|
||||
Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
||||
// the vehicle is stationary at the beginning at position 0,0,0
|
||||
Vector3 current_velocity_global = Vector3::Zero();
|
||||
auto current_bias = imuBias::ConstantBias(); // init with zero bias
|
||||
|
||||
auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||
Vector3::Constant(1.0))
|
||||
.finished());
|
||||
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
|
||||
auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector6() << Vector3::Constant(0.100),
|
||||
Vector3::Constant(5.00e-05))
|
||||
.finished());
|
||||
auto sigma_init_x = noiseModel::Diagonal::Precisions(
|
||||
(Vector6() << Vector3::Constant(0), Vector3::Constant(1.0)).finished());
|
||||
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
|
||||
auto sigma_init_b = noiseModel::Diagonal::Sigmas(
|
||||
(Vector6() << Vector3::Constant(0.100), Vector3::Constant(5.00e-05))
|
||||
.finished());
|
||||
|
||||
// Set IMU preintegration parameters
|
||||
Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
|
||||
Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
|
||||
// error committed in integrating position from velocities
|
||||
Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2);
|
||||
// Set IMU preintegration parameters
|
||||
Matrix33 measured_acc_cov =
|
||||
I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
|
||||
Matrix33 measured_omega_cov =
|
||||
I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
|
||||
// error committed in integrating position from velocities
|
||||
Matrix33 integration_error_cov =
|
||||
I_3x3 * pow(kitti_calibration.integration_sigma, 2);
|
||||
|
||||
auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
|
||||
imu_params->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous
|
||||
imu_params->integrationCovariance = integration_error_cov; // integration uncertainty continuous
|
||||
imu_params->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous
|
||||
imu_params->omegaCoriolis = w_coriolis;
|
||||
auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
|
||||
imu_params->accelerometerCovariance =
|
||||
measured_acc_cov; // acc white noise in continuous
|
||||
imu_params->integrationCovariance =
|
||||
integration_error_cov; // integration uncertainty continuous
|
||||
imu_params->gyroscopeCovariance =
|
||||
measured_omega_cov; // gyro white noise in continuous
|
||||
imu_params->omegaCoriolis = w_coriolis;
|
||||
|
||||
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement = nullptr;
|
||||
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement =
|
||||
nullptr;
|
||||
|
||||
// Set ISAM2 parameters and create ISAM2 solver object
|
||||
ISAM2Params isam_params;
|
||||
isam_params.factorization = ISAM2Params::CHOLESKY;
|
||||
isam_params.relinearizeSkip = 10;
|
||||
// Set ISAM2 parameters and create ISAM2 solver object
|
||||
ISAM2Params isam_params;
|
||||
isam_params.factorization = ISAM2Params::CHOLESKY;
|
||||
isam_params.relinearizeSkip = 10;
|
||||
|
||||
ISAM2 isam(isam_params);
|
||||
ISAM2 isam(isam_params);
|
||||
|
||||
// Create the factor graph and values object that will store new factors and values to add to the incremental graph
|
||||
NonlinearFactorGraph new_factors;
|
||||
Values new_values; // values storing the initial estimates of new nodes in the factor graph
|
||||
// Create the factor graph and values object that will store new factors and
|
||||
// values to add to the incremental graph
|
||||
NonlinearFactorGraph new_factors;
|
||||
Values new_values; // values storing the initial estimates of new nodes in
|
||||
// the factor graph
|
||||
|
||||
/// Main loop:
|
||||
/// (1) we read the measurements
|
||||
/// (2) we create the corresponding factors in the graph
|
||||
/// (3) we solve the graph to obtain and optimal estimate of robot trajectory
|
||||
printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n");
|
||||
size_t j = 0;
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
// At each non=IMU measurement we initialize a new node in the graph
|
||||
auto current_pose_key = X(i);
|
||||
auto current_vel_key = V(i);
|
||||
auto current_bias_key = B(i);
|
||||
double t = gps_measurements[i].time;
|
||||
/// Main loop:
|
||||
/// (1) we read the measurements
|
||||
/// (2) we create the corresponding factors in the graph
|
||||
/// (3) we solve the graph to obtain and optimal estimate of robot trajectory
|
||||
printf(
|
||||
"-- Starting main loop: inference is performed at each time step, but we "
|
||||
"plot trajectory every 10 steps\n");
|
||||
size_t j = 0;
|
||||
size_t included_imu_measurement_count = 0;
|
||||
|
||||
if (i == first_gps_pose) {
|
||||
// Create initial estimate and prior on initial pose, velocity, and biases
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, current_pose_global, sigma_init_x);
|
||||
new_factors.emplace_shared<PriorFactor<Vector3>>(current_vel_key, current_velocity_global, sigma_init_v);
|
||||
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(current_bias_key, current_bias, sigma_init_b);
|
||||
} else {
|
||||
double t_previous = gps_measurements[i-1].time;
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
// At each non=IMU measurement we initialize a new node in the graph
|
||||
auto current_pose_key = X(i);
|
||||
auto current_vel_key = V(i);
|
||||
auto current_bias_key = B(i);
|
||||
double t = gps_measurements[i].time;
|
||||
|
||||
// Summarize IMU data between the previous GPS measurement and now
|
||||
current_summarized_measurement = std::make_shared<PreintegratedImuMeasurements>(imu_params, current_bias);
|
||||
static size_t included_imu_measurement_count = 0;
|
||||
while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
|
||||
if (imu_measurements[j].time >= t_previous) {
|
||||
current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer,
|
||||
imu_measurements[j].gyroscope,
|
||||
imu_measurements[j].dt);
|
||||
included_imu_measurement_count++;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
if (i == first_gps_pose) {
|
||||
// Create initial estimate and prior on initial pose, velocity, and biases
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(
|
||||
current_pose_key, current_pose_global, sigma_init_x);
|
||||
new_factors.emplace_shared<PriorFactor<Vector3>>(
|
||||
current_vel_key, current_velocity_global, sigma_init_v);
|
||||
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(
|
||||
current_bias_key, current_bias, sigma_init_b);
|
||||
} else {
|
||||
double t_previous = gps_measurements[i - 1].time;
|
||||
|
||||
// Create IMU factor
|
||||
auto previous_pose_key = X(i-1);
|
||||
auto previous_vel_key = V(i-1);
|
||||
auto previous_bias_key = B(i-1);
|
||||
// Summarize IMU data between the previous GPS measurement and now
|
||||
current_summarized_measurement =
|
||||
std::make_shared<PreintegratedImuMeasurements>(imu_params,
|
||||
current_bias);
|
||||
|
||||
new_factors.emplace_shared<ImuFactor>(previous_pose_key, previous_vel_key,
|
||||
current_pose_key, current_vel_key,
|
||||
previous_bias_key, *current_summarized_measurement);
|
||||
|
||||
// Bias evolution as given in the IMU metadata
|
||||
auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector6() <<
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma),
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma))
|
||||
.finished());
|
||||
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(previous_bias_key,
|
||||
current_bias_key,
|
||||
imuBias::ConstantBias(),
|
||||
sigma_between_b);
|
||||
|
||||
// Create GPS factor
|
||||
auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position);
|
||||
if ((i % gps_skip) == 0) {
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, gps_pose, noise_model_gps);
|
||||
new_values.insert(current_pose_key, gps_pose);
|
||||
|
||||
printf("################ POSE INCLUDED AT TIME %lf ################\n", t);
|
||||
cout << gps_pose.translation();
|
||||
printf("\n\n");
|
||||
} else {
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
}
|
||||
|
||||
// Add initial values for velocity and bias based on the previous estimates
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
|
||||
// Update solver
|
||||
// =======================================================================
|
||||
// We accumulate 2*GPSskip GPS measurements before updating the solver at
|
||||
// first so that the heading becomes observable.
|
||||
if (i > (first_gps_pose + 2*gps_skip)) {
|
||||
printf("################ NEW FACTORS AT TIME %lf ################\n", t);
|
||||
new_factors.print();
|
||||
|
||||
isam.update(new_factors, new_values);
|
||||
|
||||
// Reset the newFactors and newValues list
|
||||
new_factors.resize(0);
|
||||
new_values.clear();
|
||||
|
||||
// Extract the result/current estimates
|
||||
Values result = isam.calculateEstimate();
|
||||
|
||||
current_pose_global = result.at<Pose3>(current_pose_key);
|
||||
current_velocity_global = result.at<Vector3>(current_vel_key);
|
||||
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
|
||||
|
||||
printf("\n################ POSE AT TIME %lf ################\n", t);
|
||||
current_pose_global.print();
|
||||
printf("\n\n");
|
||||
}
|
||||
while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
|
||||
if (imu_measurements[j].time >= t_previous) {
|
||||
current_summarized_measurement->integrateMeasurement(
|
||||
imu_measurements[j].accelerometer, imu_measurements[j].gyroscope,
|
||||
imu_measurements[j].dt);
|
||||
included_imu_measurement_count++;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
|
||||
// Create IMU factor
|
||||
auto previous_pose_key = X(i - 1);
|
||||
auto previous_vel_key = V(i - 1);
|
||||
auto previous_bias_key = B(i - 1);
|
||||
|
||||
new_factors.emplace_shared<ImuFactor>(
|
||||
previous_pose_key, previous_vel_key, current_pose_key,
|
||||
current_vel_key, previous_bias_key, *current_summarized_measurement);
|
||||
|
||||
// Bias evolution as given in the IMU metadata
|
||||
auto sigma_between_b = noiseModel::Diagonal::Sigmas(
|
||||
(Vector6() << Vector3::Constant(
|
||||
sqrt(included_imu_measurement_count) *
|
||||
kitti_calibration.accelerometer_bias_sigma),
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) *
|
||||
kitti_calibration.gyroscope_bias_sigma))
|
||||
.finished());
|
||||
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(
|
||||
previous_bias_key, current_bias_key, imuBias::ConstantBias(),
|
||||
sigma_between_b);
|
||||
|
||||
// Create GPS factor
|
||||
auto gps_pose =
|
||||
Pose3(current_pose_global.rotation(), gps_measurements[i].position);
|
||||
if ((i % gps_skip) == 0) {
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(
|
||||
current_pose_key, gps_pose, noise_model_gps);
|
||||
new_values.insert(current_pose_key, gps_pose);
|
||||
|
||||
printf("############ POSE INCLUDED AT TIME %.6lf ############\n",
|
||||
t);
|
||||
cout << gps_pose.translation();
|
||||
printf("\n\n");
|
||||
} else {
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
}
|
||||
|
||||
// Add initial values for velocity and bias based on the previous
|
||||
// estimates
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
|
||||
// Update solver
|
||||
// =======================================================================
|
||||
// We accumulate 2*GPSskip GPS measurements before updating the solver at
|
||||
// first so that the heading becomes observable.
|
||||
if (i > (first_gps_pose + 2 * gps_skip)) {
|
||||
printf("############ NEW FACTORS AT TIME %.6lf ############\n",
|
||||
t);
|
||||
new_factors.print();
|
||||
|
||||
isam.update(new_factors, new_values);
|
||||
|
||||
// Reset the newFactors and newValues list
|
||||
new_factors.resize(0);
|
||||
new_values.clear();
|
||||
|
||||
// Extract the result/current estimates
|
||||
Values result = isam.calculateEstimate();
|
||||
|
||||
current_pose_global = result.at<Pose3>(current_pose_key);
|
||||
current_velocity_global = result.at<Vector3>(current_vel_key);
|
||||
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
|
||||
|
||||
printf("\n############ POSE AT TIME %lf ############\n", t);
|
||||
current_pose_global.print();
|
||||
printf("\n\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save results to file
|
||||
printf("\nWriting results to file...\n");
|
||||
FILE* fp_out = fopen(output_filename.c_str(), "w+");
|
||||
fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
|
||||
// Save results to file
|
||||
printf("\nWriting results to file...\n");
|
||||
FILE* fp_out = fopen(output_filename.c_str(), "w+");
|
||||
fprintf(fp_out,
|
||||
"#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
|
||||
|
||||
Values result = isam.calculateEstimate();
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
auto pose_key = X(i);
|
||||
auto vel_key = V(i);
|
||||
auto bias_key = B(i);
|
||||
Values result = isam.calculateEstimate();
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
auto pose_key = X(i);
|
||||
auto vel_key = V(i);
|
||||
auto bias_key = B(i);
|
||||
|
||||
auto pose = result.at<Pose3>(pose_key);
|
||||
auto velocity = result.at<Vector3>(vel_key);
|
||||
auto bias = result.at<imuBias::ConstantBias>(bias_key);
|
||||
auto pose = result.at<Pose3>(pose_key);
|
||||
auto velocity = result.at<Vector3>(vel_key);
|
||||
auto bias = result.at<imuBias::ConstantBias>(bias_key);
|
||||
|
||||
auto pose_quat = pose.rotation().toQuaternion();
|
||||
auto gps = gps_measurements[i].position;
|
||||
auto pose_quat = pose.rotation().toQuaternion();
|
||||
auto gps = gps_measurements[i].position;
|
||||
|
||||
cout << "State at #" << i << endl;
|
||||
cout << "Pose:" << endl << pose << endl;
|
||||
cout << "Velocity:" << endl << velocity << endl;
|
||||
cout << "Bias:" << endl << bias << endl;
|
||||
cout << "State at #" << i << endl;
|
||||
cout << "Pose:" << endl << pose << endl;
|
||||
cout << "Velocity:" << endl << velocity << endl;
|
||||
cout << "Bias:" << endl << bias << endl;
|
||||
|
||||
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
||||
gps_measurements[i].time,
|
||||
pose.x(), pose.y(), pose.z(),
|
||||
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(),
|
||||
gps(0), gps(1), gps(2));
|
||||
}
|
||||
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
||||
gps_measurements[i].time, pose.x(), pose.y(), pose.z(),
|
||||
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(), gps(0),
|
||||
gps(1), gps(2));
|
||||
}
|
||||
|
||||
fclose(fp_out);
|
||||
fclose(fp_out);
|
||||
}
|
||||
|
|
|
@ -25,7 +25,8 @@
|
|||
* A row starting with "i" is the first initial position formatted with
|
||||
* N, E, D, qx, qY, qZ, qW, velN, velE, velD
|
||||
* A row starting with "0" is an imu measurement
|
||||
* linAccN, linAccE, linAccD, angVelN, angVelE, angVelD
|
||||
* (body frame - Forward, Right, Down)
|
||||
* linAccX, linAccY, linAccZ, angVelX, angVelY, angVelX
|
||||
* A row starting with "1" is a gps correction formatted with
|
||||
* N, E, D, qX, qY, qZ, qW
|
||||
* Note that for GPS correction, we're only using the position not the
|
||||
|
@ -93,7 +94,7 @@ boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
|
|||
I_3x3 * 1e-8; // error committed in integrating position from velocities
|
||||
Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2);
|
||||
Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2);
|
||||
Matrix66 bias_acc_omega_int =
|
||||
Matrix66 bias_acc_omega_init =
|
||||
I_6x6 * 1e-5; // error in the bias used for preintegration
|
||||
|
||||
auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0);
|
||||
|
@ -109,7 +110,7 @@ boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() {
|
|||
// PreintegrationCombinedMeasurements params:
|
||||
p->biasAccCovariance = bias_acc_cov; // acc bias in continuous
|
||||
p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous
|
||||
p->biasAccOmegaInt = bias_acc_omega_int;
|
||||
p->biasAccOmegaInt = bias_acc_omega_init;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
@ -266,7 +267,6 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
if (use_isam) {
|
||||
isam2->update(*graph, initial_values);
|
||||
isam2->update();
|
||||
result = isam2->calculateEstimate();
|
||||
|
||||
// reset the graph
|
||||
|
|
|
@ -62,10 +62,10 @@ using namespace gtsam;
|
|||
//
|
||||
// The factor will be a unary factor, affect only a single system variable. It will
|
||||
// also use a standard Gaussian noise model. Hence, we will derive our new factor from
|
||||
// the NoiseModelFactor1.
|
||||
// the NoiseModelFactorN.
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
|
||||
class UnaryFactor: public NoiseModelFactor1<Pose2> {
|
||||
class UnaryFactor: public NoiseModelFactorN<Pose2> {
|
||||
// The factor will hold a measurement consisting of an (X,Y) location
|
||||
// We could this with a Point2 but here we just use two doubles
|
||||
double mx_, my_;
|
||||
|
@ -76,11 +76,11 @@ class UnaryFactor: public NoiseModelFactor1<Pose2> {
|
|||
|
||||
// The constructor requires the variable key, the (X, Y) measurement value, and the noise model
|
||||
UnaryFactor(Key j, double x, double y, const SharedNoiseModel& model):
|
||||
NoiseModelFactor1<Pose2>(model, j), mx_(x), my_(y) {}
|
||||
NoiseModelFactorN<Pose2>(model, j), mx_(x), my_(y) {}
|
||||
|
||||
~UnaryFactor() override {}
|
||||
|
||||
// Using the NoiseModelFactor1 base class there are two functions that must be overridden.
|
||||
// Using the NoiseModelFactorN base class there are two functions that must be overridden.
|
||||
// The first is the 'evaluateError' function. This function implements the desired measurement
|
||||
// function, returning a vector of errors when evaluated at the provided variable value. It
|
||||
// must also calculate the Jacobians for this measurement function, if requested.
|
||||
|
|
|
@ -60,11 +60,10 @@ int main(int argc, char** argv) {
|
|||
|
||||
// save factor graph as graphviz dot file
|
||||
// Render to PDF using "fdp Pose2SLAMExample.dot -Tpdf > graph.pdf"
|
||||
ofstream os("Pose2SLAMExample.dot");
|
||||
graph.saveGraph(os, result);
|
||||
graph.saveGraph("Pose2SLAMExample.dot", result);
|
||||
|
||||
// Also print out to console
|
||||
graph.saveGraph(cout, result);
|
||||
graph.dot(cout, result);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -71,11 +71,11 @@ int main(const int argc, const char *argv[]) {
|
|||
if (pose3Between){
|
||||
Key key1, key2;
|
||||
if(add){
|
||||
key1 = pose3Between->key1() + firstKey;
|
||||
key2 = pose3Between->key2() + firstKey;
|
||||
key1 = pose3Between->key<1>() + firstKey;
|
||||
key2 = pose3Between->key<2>() + firstKey;
|
||||
}else{
|
||||
key1 = pose3Between->key1() - firstKey;
|
||||
key2 = pose3Between->key2() - firstKey;
|
||||
key1 = pose3Between->key<1>() - firstKey;
|
||||
key2 = pose3Between->key<2>() - firstKey;
|
||||
}
|
||||
NonlinearFactor::shared_ptr simpleFactor(
|
||||
new BetweenFactor<Pose3>(key1, key2, pose3Between->measured(), pose3Between->noiseModel()));
|
||||
|
|
|
@ -33,7 +33,6 @@ The following examples illustrate some concepts from Georgia Tech's research pap
|
|||
## 2D Pose SLAM
|
||||
|
||||
* **LocalizationExample.cpp**: modeling robot motion
|
||||
* **LocalizationExample2.cpp**: example with GPS like measurements
|
||||
* **Pose2SLAMExample**: A 2D Pose SLAM example using the predefined typedefs in gtsam/slam/pose2SLAM.h
|
||||
* **Pose2SLAMExample_advanced**: same, but uses an Optimizer object
|
||||
* **Pose2SLAMwSPCG**: solve a simple 3 by 3 grid of Pose2 SLAM problem by using easy SPCG interface
|
||||
|
@ -51,13 +50,13 @@ The directory **vSLAMexample** includes 2 simple examples using GTSAM:
|
|||
|
||||
See the separate README file there.
|
||||
|
||||
##Undirected Graphical Models (UGM)
|
||||
## Undirected Graphical Models (UGM)
|
||||
The best representation for a Markov Random Field is a factor graph :-) This is illustrated with some discrete examples from the UGM MATLAB toolbox, which
|
||||
can be found at <http://www.di.ens.fr/~mschmidt/Software/UGM>
|
||||
|
||||
|
||||
##Building and Running
|
||||
To build, cd into the directory and do:
|
||||
## Building and Running
|
||||
To build, cd into the top-level gtsam directory and do:
|
||||
|
||||
```
|
||||
mkdir build
|
||||
|
|
|
@ -10,62 +10,81 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file RangeISAMExample_plaza1.cpp
|
||||
* @file RangeISAMExample_plaza2.cpp
|
||||
* @brief A 2D Range SLAM example
|
||||
* @date June 20, 2013
|
||||
* @author FRank Dellaert
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
// Both relative poses and recovered trajectory poses will be stored as Pose2 objects
|
||||
// Both relative poses and recovered trajectory poses will be stored as Pose2.
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
using gtsam::Pose2;
|
||||
|
||||
// Each variable in the system (poses and landmarks) must be identified with a unique key.
|
||||
// We can either use simple integer keys (1, 2, 3, ...) or symbols (X1, X2, L1).
|
||||
// Here we will use Symbols
|
||||
// gtsam::Vectors are dynamic Eigen vectors, Vector3 is statically sized.
|
||||
#include <gtsam/base/Vector.h>
|
||||
using gtsam::Vector;
|
||||
using gtsam::Vector3;
|
||||
|
||||
// Unknown landmarks are of type Point2 (which is just a 2D Eigen vector).
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
using gtsam::Point2;
|
||||
|
||||
// Each variable in the system (poses and landmarks) must be identified with a
|
||||
// unique key. We can either use simple integer keys (1, 2, 3, ...) or symbols
|
||||
// (X1, X2, L1). Here we will use Symbols.
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
using gtsam::Symbol;
|
||||
|
||||
// We want to use iSAM2 to solve the range-SLAM problem incrementally
|
||||
// We want to use iSAM2 to solve the range-SLAM problem incrementally.
|
||||
#include <gtsam/nonlinear/ISAM2.h>
|
||||
|
||||
// iSAM2 requires as input a set set of new factors to be added stored in a factor graph,
|
||||
// and initial guesses for any new variables used in the added factors
|
||||
// iSAM2 requires as input a set set of new factors to be added stored in a
|
||||
// factor graph, and initial guesses for any new variables in the added factors.
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
|
||||
// We will use a non-liear solver to batch-inituialize from the first 150 frames
|
||||
// We will use a non-linear solver to batch-initialize from the first 150 frames
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
// In GTSAM, measurement functions are represented as 'factors'. Several common factors
|
||||
// have been provided with the library for solving robotics SLAM problems.
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
// Measurement functions are represented as 'factors'. Several common factors
|
||||
// have been provided with the library for solving robotics SLAM problems:
|
||||
#include <gtsam/sam/RangeFactor.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
// Standard headers, added last, so we know headers above work on their own
|
||||
// Timing, with functions below, provides nice facilities to benchmark.
|
||||
#include <gtsam/base/timing.h>
|
||||
using gtsam::tictoc_print_;
|
||||
|
||||
// Standard headers, added last, so we know headers above work on their own.
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
namespace NM = gtsam::noiseModel;
|
||||
|
||||
// data available at http://www.frc.ri.cmu.edu/projects/emergencyresponse/RangeData/
|
||||
// Datafile format (from http://www.frc.ri.cmu.edu/projects/emergencyresponse/RangeData/log.html)
|
||||
// Data is second UWB ranging dataset, B2 or "plaza 2", from
|
||||
// "Navigating with Ranging Radios: Five Data Sets with Ground Truth"
|
||||
// by Joseph Djugash, Bradley Hamner, and Stephan Roth
|
||||
// https://www.ri.cmu.edu/pub_files/2009/9/Final_5datasetsRangingRadios.pdf
|
||||
|
||||
// load the odometry
|
||||
// DR: Odometry Input (delta distance traveled and delta heading change)
|
||||
// Time (sec) Delta Dist. Trav. (m) Delta Heading (rad)
|
||||
typedef pair<double, Pose2> TimedOdometry;
|
||||
list<TimedOdometry> readOdometry() {
|
||||
list<TimedOdometry> odometryList;
|
||||
string data_file = findExampleDataFile("Plaza2_DR.txt");
|
||||
ifstream is(data_file.c_str());
|
||||
// Time (sec) Delta Distance Traveled (m) Delta Heading (rad)
|
||||
using TimedOdometry = std::pair<double, Pose2>;
|
||||
std::list<TimedOdometry> readOdometry() {
|
||||
std::list<TimedOdometry> odometryList;
|
||||
std::string data_file = gtsam::findExampleDataFile("Plaza2_DR.txt");
|
||||
std::ifstream is(data_file.c_str());
|
||||
|
||||
while (is) {
|
||||
double t, distance_traveled, delta_heading;
|
||||
is >> t >> distance_traveled >> delta_heading;
|
||||
odometryList.push_back(
|
||||
TimedOdometry(t, Pose2(distance_traveled, 0, delta_heading)));
|
||||
odometryList.emplace_back(t, Pose2(distance_traveled, 0, delta_heading));
|
||||
}
|
||||
is.clear(); /* clears the end-of-file and error flags */
|
||||
return odometryList;
|
||||
|
@ -73,90 +92,85 @@ list<TimedOdometry> readOdometry() {
|
|||
|
||||
// load the ranges from TD
|
||||
// Time (sec) Sender / Antenna ID Receiver Node ID Range (m)
|
||||
typedef boost::tuple<double, size_t, double> RangeTriple;
|
||||
vector<RangeTriple> readTriples() {
|
||||
vector<RangeTriple> triples;
|
||||
string data_file = findExampleDataFile("Plaza2_TD.txt");
|
||||
ifstream is(data_file.c_str());
|
||||
using RangeTriple = boost::tuple<double, size_t, double>;
|
||||
std::vector<RangeTriple> readTriples() {
|
||||
std::vector<RangeTriple> triples;
|
||||
std::string data_file = gtsam::findExampleDataFile("Plaza2_TD.txt");
|
||||
std::ifstream is(data_file.c_str());
|
||||
|
||||
while (is) {
|
||||
double t, sender, range;
|
||||
size_t receiver;
|
||||
double t, range, sender, receiver;
|
||||
is >> t >> sender >> receiver >> range;
|
||||
triples.push_back(RangeTriple(t, receiver, range));
|
||||
triples.emplace_back(t, receiver, range);
|
||||
}
|
||||
is.clear(); /* clears the end-of-file and error flags */
|
||||
return triples;
|
||||
}
|
||||
|
||||
// main
|
||||
int main (int argc, char** argv) {
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
// load Plaza2 data
|
||||
list<TimedOdometry> odometry = readOdometry();
|
||||
// size_t M = odometry.size();
|
||||
std::list<TimedOdometry> odometry = readOdometry();
|
||||
size_t M = odometry.size();
|
||||
std::cout << "Read " << M << " odometry entries." << std::endl;
|
||||
|
||||
vector<RangeTriple> triples = readTriples();
|
||||
std::vector<RangeTriple> triples = readTriples();
|
||||
size_t K = triples.size();
|
||||
std::cout << "Read " << K << " range triples." << std::endl;
|
||||
|
||||
// parameters
|
||||
size_t minK = 150; // minimum number of range measurements to process initially
|
||||
size_t incK = 25; // minimum number of range measurements to process after
|
||||
bool groundTruth = false;
|
||||
size_t minK =
|
||||
150; // minimum number of range measurements to process initially
|
||||
size_t incK = 25; // minimum number of range measurements to process after
|
||||
bool robust = true;
|
||||
|
||||
// Set Noise parameters
|
||||
Vector priorSigmas = Vector3(1,1,M_PI);
|
||||
Vector priorSigmas = Vector3(1, 1, M_PI);
|
||||
Vector odoSigmas = Vector3(0.05, 0.01, 0.1);
|
||||
double sigmaR = 100; // range standard deviation
|
||||
const NM::Base::shared_ptr // all same type
|
||||
priorNoise = NM::Diagonal::Sigmas(priorSigmas), //prior
|
||||
odoNoise = NM::Diagonal::Sigmas(odoSigmas), // odometry
|
||||
gaussian = NM::Isotropic::Sigma(1, sigmaR), // non-robust
|
||||
tukey = NM::Robust::Create(NM::mEstimator::Tukey::Create(15), gaussian), //robust
|
||||
rangeNoise = robust ? tukey : gaussian;
|
||||
double sigmaR = 100; // range standard deviation
|
||||
const NM::Base::shared_ptr // all same type
|
||||
priorNoise = NM::Diagonal::Sigmas(priorSigmas), // prior
|
||||
looseNoise = NM::Isotropic::Sigma(2, 1000), // loose LM prior
|
||||
odoNoise = NM::Diagonal::Sigmas(odoSigmas), // odometry
|
||||
gaussian = NM::Isotropic::Sigma(1, sigmaR), // non-robust
|
||||
tukey = NM::Robust::Create(NM::mEstimator::Tukey::Create(15),
|
||||
gaussian), // robust
|
||||
rangeNoise = robust ? tukey : gaussian;
|
||||
|
||||
// Initialize iSAM
|
||||
ISAM2 isam;
|
||||
gtsam::ISAM2 isam;
|
||||
|
||||
// Add prior on first pose
|
||||
Pose2 pose0 = Pose2(-34.2086489999201, 45.3007639991120,
|
||||
M_PI - 2.02108900000000);
|
||||
NonlinearFactorGraph newFactors;
|
||||
Pose2 pose0 = Pose2(-34.2086489999201, 45.3007639991120, M_PI - 2.021089);
|
||||
gtsam::NonlinearFactorGraph newFactors;
|
||||
newFactors.addPrior(0, pose0, priorNoise);
|
||||
Values initial;
|
||||
gtsam::Values initial;
|
||||
initial.insert(0, pose0);
|
||||
|
||||
// initialize points
|
||||
if (groundTruth) { // from TL file
|
||||
initial.insert(symbol('L', 1), Point2(-68.9265, 18.3778));
|
||||
initial.insert(symbol('L', 6), Point2(-37.5805, 69.2278));
|
||||
initial.insert(symbol('L', 0), Point2(-33.6205, 26.9678));
|
||||
initial.insert(symbol('L', 5), Point2(1.7095, -5.8122));
|
||||
} else { // drawn from sigma=1 Gaussian in matlab version
|
||||
initial.insert(symbol('L', 1), Point2(3.5784, 2.76944));
|
||||
initial.insert(symbol('L', 6), Point2(-1.34989, 3.03492));
|
||||
initial.insert(symbol('L', 0), Point2(0.725404, -0.0630549));
|
||||
initial.insert(symbol('L', 5), Point2(0.714743, -0.204966));
|
||||
}
|
||||
// We will initialize landmarks randomly, and keep track of which landmarks we
|
||||
// already added with a set.
|
||||
std::mt19937_64 rng;
|
||||
std::normal_distribution<double> normal(0.0, 100.0);
|
||||
std::set<Symbol> initializedLandmarks;
|
||||
|
||||
// set some loop variables
|
||||
size_t i = 1; // step counter
|
||||
size_t k = 0; // range measurement counter
|
||||
size_t i = 1; // step counter
|
||||
size_t k = 0; // range measurement counter
|
||||
bool initialized = false;
|
||||
Pose2 lastPose = pose0;
|
||||
size_t countK = 0;
|
||||
|
||||
// Loop over odometry
|
||||
gttic_(iSAM);
|
||||
for(const TimedOdometry& timedOdometry: odometry) {
|
||||
//--------------------------------- odometry loop -----------------------------------------
|
||||
for (const TimedOdometry& timedOdometry : odometry) {
|
||||
//--------------------------------- odometry loop --------------------------
|
||||
double t;
|
||||
Pose2 odometry;
|
||||
boost::tie(t, odometry) = timedOdometry;
|
||||
|
||||
// add odometry factor
|
||||
newFactors.push_back(BetweenFactor<Pose2>(i-1, i, odometry, odoNoise));
|
||||
newFactors.emplace_shared<gtsam::BetweenFactor<Pose2>>(i - 1, i, odometry,
|
||||
odoNoise);
|
||||
|
||||
// predict pose and add as initial estimate
|
||||
Pose2 predictedPose = lastPose.compose(odometry);
|
||||
|
@ -166,17 +180,30 @@ int main (int argc, char** argv) {
|
|||
// Check if there are range factors to be added
|
||||
while (k < K && t >= boost::get<0>(triples[k])) {
|
||||
size_t j = boost::get<1>(triples[k]);
|
||||
Symbol landmark_key('L', j);
|
||||
double range = boost::get<2>(triples[k]);
|
||||
newFactors.push_back(RangeFactor<Pose2, Point2>(i, symbol('L', j), range,rangeNoise));
|
||||
newFactors.emplace_shared<gtsam::RangeFactor<Pose2, Point2>>(
|
||||
i, landmark_key, range, rangeNoise);
|
||||
if (initializedLandmarks.count(landmark_key) == 0) {
|
||||
std::cout << "adding landmark " << j << std::endl;
|
||||
double x = normal(rng), y = normal(rng);
|
||||
initial.insert(landmark_key, Point2(x, y));
|
||||
initializedLandmarks.insert(landmark_key);
|
||||
// We also add a very loose prior on the landmark in case there is only
|
||||
// one sighting, which cannot fully determine the landmark.
|
||||
newFactors.emplace_shared<gtsam::PriorFactor<Point2>>(
|
||||
landmark_key, Point2(0, 0), looseNoise);
|
||||
}
|
||||
k = k + 1;
|
||||
countK = countK + 1;
|
||||
}
|
||||
|
||||
// Check whether to update iSAM 2
|
||||
if ((k > minK) && (countK > incK)) {
|
||||
if (!initialized) { // Do a full optimize for first minK ranges
|
||||
if (!initialized) { // Do a full optimize for first minK ranges
|
||||
std::cout << "Initializing at time " << k << std::endl;
|
||||
gttic_(batchInitialization);
|
||||
LevenbergMarquardtOptimizer batchOptimizer(newFactors, initial);
|
||||
gtsam::LevenbergMarquardtOptimizer batchOptimizer(newFactors, initial);
|
||||
initial = batchOptimizer.optimize();
|
||||
gttoc_(batchInitialization);
|
||||
initialized = true;
|
||||
|
@ -185,21 +212,27 @@ int main (int argc, char** argv) {
|
|||
isam.update(newFactors, initial);
|
||||
gttoc_(update);
|
||||
gttic_(calculateEstimate);
|
||||
Values result = isam.calculateEstimate();
|
||||
gtsam::Values result = isam.calculateEstimate();
|
||||
gttoc_(calculateEstimate);
|
||||
lastPose = result.at<Pose2>(i);
|
||||
newFactors = NonlinearFactorGraph();
|
||||
initial = Values();
|
||||
newFactors = gtsam::NonlinearFactorGraph();
|
||||
initial = gtsam::Values();
|
||||
countK = 0;
|
||||
}
|
||||
i += 1;
|
||||
//--------------------------------- odometry loop -----------------------------------------
|
||||
} // end for
|
||||
//--------------------------------- odometry loop --------------------------
|
||||
} // end for
|
||||
gttoc_(iSAM);
|
||||
|
||||
// Print timings
|
||||
tictoc_print_();
|
||||
|
||||
// Print optimized landmarks:
|
||||
gtsam::Values finalResult = isam.calculateEstimate();
|
||||
for (auto&& landmark_key : initializedLandmarks) {
|
||||
Point2 p = finalResult.at<Point2>(landmark_key);
|
||||
std::cout << landmark_key << ":" << p.transpose() << "\n";
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,9 +26,12 @@
|
|||
#include <gtsam/nonlinear/ExpressionFactorGraph.h>
|
||||
|
||||
// Header order is close to far
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/dataset.h> // for loading BAL datasets !
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
@ -46,10 +49,9 @@ int main(int argc, char* argv[]) {
|
|||
if (argc > 1) filename = string(argv[1]);
|
||||
|
||||
// Load the SfM data from file
|
||||
SfmData mydata;
|
||||
readBAL(filename, mydata);
|
||||
SfmData mydata = SfmData::FromBalFile(filename);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") %
|
||||
mydata.number_tracks() % mydata.number_cameras();
|
||||
mydata.numberTracks() % mydata.numberCameras();
|
||||
|
||||
// Create a factor graph
|
||||
ExpressionFactorGraph graph;
|
||||
|
|
|
@ -10,17 +10,20 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SFMExample.cpp
|
||||
* @file SFMExample_bal.cpp
|
||||
* @brief Solve a structure-from-motion problem from a "Bundle Adjustment in the Large" file
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
// For an explanation of headers, see SFMExample.cpp
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/dataset.h> // for loading BAL datasets !
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
@ -41,9 +44,8 @@ int main (int argc, char* argv[]) {
|
|||
if (argc>1) filename = string(argv[1]);
|
||||
|
||||
// Load the SfM data from file
|
||||
SfmData mydata;
|
||||
readBAL(filename, mydata);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") % mydata.number_tracks() % mydata.number_cameras();
|
||||
SfmData mydata = SfmData::FromBalFile(filename);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") % mydata.numberTracks() % mydata.numberCameras();
|
||||
|
||||
// Create a factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
|
|
|
@ -17,15 +17,16 @@
|
|||
*/
|
||||
|
||||
// For an explanation of headers, see SFMExample.cpp
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/dataset.h> // for loading BAL datasets !
|
||||
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
@ -45,10 +46,9 @@ int main(int argc, char* argv[]) {
|
|||
if (argc > 1) filename = string(argv[1]);
|
||||
|
||||
// Load the SfM data from file
|
||||
SfmData mydata;
|
||||
readBAL(filename, mydata);
|
||||
SfmData mydata = SfmData::FromBalFile(filename);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") %
|
||||
mydata.number_tracks() % mydata.number_cameras();
|
||||
mydata.numberTracks() % mydata.numberCameras();
|
||||
|
||||
// Create a factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
|
@ -131,7 +131,7 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
cout << "Time comparison by solving " << filename << " results:" << endl;
|
||||
cout << boost::format("%1% point tracks and %2% cameras\n") %
|
||||
mydata.number_tracks() % mydata.number_cameras()
|
||||
mydata.numberTracks() % mydata.numberCameras()
|
||||
<< endl;
|
||||
|
||||
tictoc_print_();
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
* Passing function argument allows to specificy an initial position, a pose increment and step count.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
// As this is a full 3D problem, we will use Pose3 variables to represent the camera
|
||||
// positions and Point3 variables (x, y, z) to represent the landmark coordinates.
|
||||
// Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y).
|
||||
|
@ -66,4 +68,4 @@ std::vector<gtsam::Pose3> createPoses(
|
|||
}
|
||||
|
||||
return poses;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,8 +69,8 @@ namespace br = boost::range;
|
|||
|
||||
typedef Pose2 Pose;
|
||||
|
||||
typedef NoiseModelFactor1<Pose> NM1;
|
||||
typedef NoiseModelFactor2<Pose,Pose> NM2;
|
||||
typedef NoiseModelFactorN<Pose> NM1;
|
||||
typedef NoiseModelFactorN<Pose,Pose> NM2;
|
||||
typedef BearingRangeFactor<Pose,Point2> BR;
|
||||
|
||||
double chi2_red(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& config) {
|
||||
|
@ -261,7 +261,7 @@ void runIncremental()
|
|||
if(BetweenFactor<Pose>::shared_ptr factor =
|
||||
boost::dynamic_pointer_cast<BetweenFactor<Pose> >(datasetMeasurements[nextMeasurement]))
|
||||
{
|
||||
Key key1 = factor->key1(), key2 = factor->key2();
|
||||
Key key1 = factor->key<1>(), key2 = factor->key<2>();
|
||||
if(((int)key1 >= firstStep && key1 < key2) || ((int)key2 >= firstStep && key2 < key1)) {
|
||||
// We found an odometry starting at firstStep
|
||||
firstPose = std::min(key1, key2);
|
||||
|
@ -313,11 +313,11 @@ void runIncremental()
|
|||
boost::dynamic_pointer_cast<BetweenFactor<Pose> >(measurementf))
|
||||
{
|
||||
// Stop collecting measurements that are for future steps
|
||||
if(factor->key1() > step || factor->key2() > step)
|
||||
if(factor->key<1>() > step || factor->key<2>() > step)
|
||||
break;
|
||||
|
||||
// Require that one of the nodes is the current one
|
||||
if(factor->key1() != step && factor->key2() != step)
|
||||
if(factor->key<1>() != step && factor->key<2>() != step)
|
||||
throw runtime_error("Problem in data file, out-of-sequence measurements");
|
||||
|
||||
// Add a new factor
|
||||
|
@ -325,22 +325,22 @@ void runIncremental()
|
|||
const auto& measured = factor->measured();
|
||||
|
||||
// Initialize the new variable
|
||||
if(factor->key1() > factor->key2()) {
|
||||
if(!newVariables.exists(factor->key1())) { // Only need to check newVariables since loop closures come after odometry
|
||||
if(factor->key<1>() > factor->key<2>()) {
|
||||
if(!newVariables.exists(factor->key<1>())) { // Only need to check newVariables since loop closures come after odometry
|
||||
if(step == 1)
|
||||
newVariables.insert(factor->key1(), measured.inverse());
|
||||
newVariables.insert(factor->key<1>(), measured.inverse());
|
||||
else {
|
||||
Pose prevPose = isam2.calculateEstimate<Pose>(factor->key2());
|
||||
newVariables.insert(factor->key1(), prevPose * measured.inverse());
|
||||
Pose prevPose = isam2.calculateEstimate<Pose>(factor->key<2>());
|
||||
newVariables.insert(factor->key<1>(), prevPose * measured.inverse());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if(!newVariables.exists(factor->key2())) { // Only need to check newVariables since loop closures come after odometry
|
||||
if(!newVariables.exists(factor->key<2>())) { // Only need to check newVariables since loop closures come after odometry
|
||||
if(step == 1)
|
||||
newVariables.insert(factor->key2(), measured);
|
||||
newVariables.insert(factor->key<2>(), measured);
|
||||
else {
|
||||
Pose prevPose = isam2.calculateEstimate<Pose>(factor->key1());
|
||||
newVariables.insert(factor->key2(), prevPose * measured);
|
||||
Pose prevPose = isam2.calculateEstimate<Pose>(factor->key<1>());
|
||||
newVariables.insert(factor->key<2>(), prevPose * measured);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,159 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file TriangulationLOSTExample.cpp
|
||||
* @author Akshay Krishnan
|
||||
* @brief This example runs triangulation several times using 3 different
|
||||
* approaches: LOST, DLT, and DLT with optimization. It reports the covariance
|
||||
* and the runtime for each approach.
|
||||
*
|
||||
* @date 2022-07-10
|
||||
*/
|
||||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/geometry/triangulation.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
static std::mt19937 rng(42);
|
||||
|
||||
void PrintCovarianceStats(const Matrix& mat, const std::string& method) {
|
||||
Matrix centered = mat.rowwise() - mat.colwise().mean();
|
||||
Matrix cov = (centered.adjoint() * centered) / double(mat.rows() - 1);
|
||||
std::cout << method << " covariance: " << std::endl;
|
||||
std::cout << cov << std::endl;
|
||||
std::cout << "Trace sqrt: " << sqrt(cov.trace()) << std::endl << std::endl;
|
||||
}
|
||||
|
||||
void PrintDuration(const std::chrono::nanoseconds dur, double num_samples,
|
||||
const std::string& method) {
|
||||
double nanoseconds = dur.count() / num_samples;
|
||||
std::cout << "Time taken by " << method << ": " << nanoseconds * 1e-3
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
void GetLargeCamerasDataset(CameraSet<PinholeCamera<Cal3_S2>>* cameras,
|
||||
std::vector<Pose3>* poses, Point3* point,
|
||||
Point2Vector* measurements) {
|
||||
const double minXY = -10, maxXY = 10;
|
||||
const double minZ = -20, maxZ = 0;
|
||||
const int nrCameras = 500;
|
||||
cameras->reserve(nrCameras);
|
||||
poses->reserve(nrCameras);
|
||||
measurements->reserve(nrCameras);
|
||||
*point = Point3(0.0, 0.0, 10.0);
|
||||
|
||||
std::uniform_real_distribution<double> rand_xy(minXY, maxXY);
|
||||
std::uniform_real_distribution<double> rand_z(minZ, maxZ);
|
||||
Cal3_S2 identityK;
|
||||
|
||||
for (int i = 0; i < nrCameras; ++i) {
|
||||
Point3 wti(rand_xy(rng), rand_xy(rng), rand_z(rng));
|
||||
Pose3 wTi(Rot3(), wti);
|
||||
|
||||
poses->push_back(wTi);
|
||||
cameras->emplace_back(wTi, identityK);
|
||||
measurements->push_back(cameras->back().project(*point));
|
||||
}
|
||||
}
|
||||
|
||||
void GetSmallCamerasDataset(CameraSet<PinholeCamera<Cal3_S2>>* cameras,
|
||||
std::vector<Pose3>* poses, Point3* point,
|
||||
Point2Vector* measurements) {
|
||||
Pose3 pose1;
|
||||
Pose3 pose2(Rot3(), Point3(5., 0., -5.));
|
||||
Cal3_S2 identityK;
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, identityK);
|
||||
PinholeCamera<Cal3_S2> camera2(pose2, identityK);
|
||||
|
||||
*point = Point3(0, 0, 1);
|
||||
cameras->push_back(camera1);
|
||||
cameras->push_back(camera2);
|
||||
*poses = {pose1, pose2};
|
||||
*measurements = {camera1.project(*point), camera2.project(*point)};
|
||||
}
|
||||
|
||||
Point2Vector AddNoiseToMeasurements(const Point2Vector& measurements,
|
||||
const double measurementSigma) {
|
||||
std::normal_distribution<double> normal(0.0, measurementSigma);
|
||||
|
||||
Point2Vector noisyMeasurements;
|
||||
noisyMeasurements.reserve(measurements.size());
|
||||
for (const auto& p : measurements) {
|
||||
noisyMeasurements.emplace_back(p.x() + normal(rng), p.y() + normal(rng));
|
||||
}
|
||||
return noisyMeasurements;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main(int argc, char* argv[]) {
|
||||
CameraSet<PinholeCamera<Cal3_S2>> cameras;
|
||||
std::vector<Pose3> poses;
|
||||
Point3 landmark;
|
||||
Point2Vector measurements;
|
||||
GetLargeCamerasDataset(&cameras, &poses, &landmark, &measurements);
|
||||
// GetSmallCamerasDataset(&cameras, &poses, &landmark, &measurements);
|
||||
const double measurementSigma = 1e-2;
|
||||
SharedNoiseModel measurementNoise =
|
||||
noiseModel::Isotropic::Sigma(2, measurementSigma);
|
||||
|
||||
const long int nrTrials = 1000;
|
||||
Matrix errorsDLT = Matrix::Zero(nrTrials, 3);
|
||||
Matrix errorsLOST = Matrix::Zero(nrTrials, 3);
|
||||
Matrix errorsDLTOpt = Matrix::Zero(nrTrials, 3);
|
||||
|
||||
double rank_tol = 1e-9;
|
||||
boost::shared_ptr<Cal3_S2> calib = boost::make_shared<Cal3_S2>();
|
||||
std::chrono::nanoseconds durationDLT;
|
||||
std::chrono::nanoseconds durationDLTOpt;
|
||||
std::chrono::nanoseconds durationLOST;
|
||||
|
||||
for (int i = 0; i < nrTrials; i++) {
|
||||
Point2Vector noisyMeasurements =
|
||||
AddNoiseToMeasurements(measurements, measurementSigma);
|
||||
|
||||
auto lostStart = std::chrono::high_resolution_clock::now();
|
||||
boost::optional<Point3> estimateLOST = triangulatePoint3<Cal3_S2>(
|
||||
cameras, noisyMeasurements, rank_tol, false, measurementNoise, true);
|
||||
durationLOST += std::chrono::high_resolution_clock::now() - lostStart;
|
||||
|
||||
auto dltStart = std::chrono::high_resolution_clock::now();
|
||||
boost::optional<Point3> estimateDLT = triangulatePoint3<Cal3_S2>(
|
||||
cameras, noisyMeasurements, rank_tol, false, measurementNoise, false);
|
||||
durationDLT += std::chrono::high_resolution_clock::now() - dltStart;
|
||||
|
||||
auto dltOptStart = std::chrono::high_resolution_clock::now();
|
||||
boost::optional<Point3> estimateDLTOpt = triangulatePoint3<Cal3_S2>(
|
||||
cameras, noisyMeasurements, rank_tol, true, measurementNoise, false);
|
||||
durationDLTOpt += std::chrono::high_resolution_clock::now() - dltOptStart;
|
||||
|
||||
errorsLOST.row(i) = *estimateLOST - landmark;
|
||||
errorsDLT.row(i) = *estimateDLT - landmark;
|
||||
errorsDLTOpt.row(i) = *estimateDLTOpt - landmark;
|
||||
}
|
||||
PrintCovarianceStats(errorsLOST, "LOST");
|
||||
PrintCovarianceStats(errorsDLT, "DLT");
|
||||
PrintCovarianceStats(errorsDLTOpt, "DLT_OPT");
|
||||
|
||||
PrintDuration(durationLOST, nrTrials, "LOST");
|
||||
PrintDuration(durationDLT, nrTrials, "DLT");
|
||||
PrintDuration(durationDLTOpt, nrTrials, "DLT_OPT");
|
||||
}
|
|
@ -68,10 +68,9 @@ int main(int argc, char** argv) {
|
|||
<< graph.size() << " factors (Unary+Edge).";
|
||||
|
||||
// "Decoding", i.e., configuration with largest value
|
||||
// We use sequential variable elimination
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||
optimalDecoding->print("\nMost Probable Explanation (optimalDecoding)\n");
|
||||
// Uses max-product.
|
||||
auto optimalDecoding = graph.optimize();
|
||||
optimalDecoding.print("\nMost Probable Explanation (optimalDecoding)\n");
|
||||
|
||||
// "Inference" Computing marginals for each node
|
||||
// Here we'll make use of DiscreteMarginals class, which makes use of
|
||||
|
|
|
@ -50,8 +50,8 @@ int main(int argc, char** argv) {
|
|||
|
||||
// Print the UGM distribution
|
||||
cout << "\nUGM distribution:" << endl;
|
||||
vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
|
||||
Cathy & Heather & Mark & Allison);
|
||||
auto allPosbValues =
|
||||
DiscreteValues::CartesianProduct(Cathy & Heather & Mark & Allison);
|
||||
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||
DiscreteFactor::Values values = allPosbValues[i];
|
||||
double prodPot = graph(values);
|
||||
|
@ -61,10 +61,9 @@ int main(int argc, char** argv) {
|
|||
}
|
||||
|
||||
// "Decoding", i.e., configuration with largest value (MPE)
|
||||
// We use sequential variable elimination
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||
optimalDecoding->print("\noptimalDecoding");
|
||||
// Uses max-product
|
||||
auto optimalDecoding = graph.optimize();
|
||||
GTSAM_PRINT(optimalDecoding);
|
||||
|
||||
// "Inference" Computing marginals
|
||||
cout << "\nComputing Node Marginals .." << endl;
|
||||
|
|
|
@ -49,10 +49,7 @@ if(NOT GTSAM_USE_SYSTEM_EIGEN)
|
|||
|
||||
endif()
|
||||
|
||||
option(GTSAM_BUILD_METIS_EXECUTABLES "Build metis library executables" OFF)
|
||||
if(GTSAM_SUPPORT_NESTED_DISSECTION)
|
||||
add_subdirectory(metis)
|
||||
endif()
|
||||
# metis: already handled in ROOT/cmake/HandleMetis.cmake
|
||||
|
||||
add_subdirectory(ceres)
|
||||
|
||||
|
|
|
@ -440,7 +440,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
|
|||
EIGEN_DEVICE_FUNC
|
||||
void lazyAssign(const TriangularBase<OtherDerived>& other);
|
||||
|
||||
/** \deprecated */
|
||||
/** @deprecated */
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC
|
||||
void lazyAssign(const MatrixBase<OtherDerived>& other);
|
||||
|
@ -523,7 +523,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
|
|||
call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>());
|
||||
}
|
||||
|
||||
/** \deprecated
|
||||
/** @deprecated
|
||||
* Shortcut for \code (*this).swap(other.triangularView<(*this)::Mode>()) \endcode */
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC
|
||||
|
|
|
@ -5,16 +5,18 @@ project(gtsam LANGUAGES CXX)
|
|||
# The following variable is the master list of subdirs to add
|
||||
set (gtsam_subdirs
|
||||
base
|
||||
basis
|
||||
geometry
|
||||
inference
|
||||
symbolic
|
||||
discrete
|
||||
hybrid
|
||||
linear
|
||||
nonlinear
|
||||
sam
|
||||
sfm
|
||||
slam
|
||||
navigation
|
||||
navigation
|
||||
)
|
||||
|
||||
set(gtsam_srcs)
|
||||
|
@ -88,7 +90,8 @@ list(APPEND gtsam_srcs "${PROJECT_BINARY_DIR}/config.h" "${PROJECT_BINARY_DIR}/d
|
|||
install(FILES "${PROJECT_BINARY_DIR}/config.h" "${PROJECT_BINARY_DIR}/dllexport.h" DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam)
|
||||
|
||||
if(GTSAM_SUPPORT_NESTED_DISSECTION)
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES metis-gtsam)
|
||||
# target metis-gtsam-if is defined in both cases: embedded metis or system version:
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES metis-gtsam-if)
|
||||
endif()
|
||||
|
||||
# Versions
|
||||
|
@ -114,12 +117,10 @@ set_target_properties(gtsam PROPERTIES
|
|||
VERSION ${gtsam_version}
|
||||
SOVERSION ${gtsam_soversion})
|
||||
|
||||
# Append Eigen include path, set in top-level CMakeLists.txt to either
|
||||
# Append Eigen include path to either
|
||||
# system-eigen, or GTSAM eigen path
|
||||
target_include_directories(gtsam PUBLIC
|
||||
$<BUILD_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_BUILD}>
|
||||
$<INSTALL_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}>
|
||||
)
|
||||
target_link_libraries(gtsam PUBLIC Eigen3::Eigen)
|
||||
|
||||
# MKL include dir:
|
||||
if (GTSAM_USE_EIGEN_MKL)
|
||||
target_include_directories(gtsam PUBLIC ${MKL_INCLUDE_DIR})
|
||||
|
@ -154,16 +155,6 @@ target_include_directories(gtsam SYSTEM BEFORE PUBLIC
|
|||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/CCOLAMD/Include>
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/CCOLAMD>
|
||||
)
|
||||
if(GTSAM_SUPPORT_NESTED_DISSECTION)
|
||||
target_include_directories(gtsam BEFORE PUBLIC
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/include>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/metis/>
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
if(WIN32) # Add 'lib' prefix to static library to avoid filename collision with shared library
|
||||
if (NOT BUILD_SHARED_LIBS)
|
||||
|
|
|
@ -5,8 +5,5 @@ install(FILES ${base_headers} DESTINATION include/gtsam/base)
|
|||
file(GLOB base_headers_tree "treeTraversal/*.h")
|
||||
install(FILES ${base_headers_tree} DESTINATION include/gtsam/base/treeTraversal)
|
||||
|
||||
file(GLOB deprecated_headers "deprecated/*.h")
|
||||
install(FILES ${deprecated_headers} DESTINATION include/gtsam/base/deprecated)
|
||||
|
||||
# Build tests
|
||||
add_subdirectory(tests)
|
||||
|
|
|
@ -62,7 +62,7 @@ namespace gtsam {
|
|||
* convenience to avoid having lengthy types in the code. Through timing,
|
||||
* we've seen that the fast_pool_allocator can lead to speedups of several
|
||||
* percent.
|
||||
* @addtogroup base
|
||||
* @ingroup base
|
||||
*/
|
||||
template<typename KEY, typename VALUE>
|
||||
class ConcurrentMap : public ConcurrentMapBase<KEY,VALUE> {
|
||||
|
@ -113,6 +113,7 @@ private:
|
|||
template<class Archive>
|
||||
void load(Archive& ar, const unsigned int /*version*/)
|
||||
{
|
||||
this->clear();
|
||||
// Load into STL container and then fill our map
|
||||
FastVector<std::pair<KEY, VALUE> > map;
|
||||
ar & BOOST_SERIALIZATION_NVP(map);
|
||||
|
|
|
@ -28,7 +28,7 @@ namespace gtsam {
|
|||
/**
|
||||
* Disjoint set forest using an STL map data structure underneath
|
||||
* Uses rank compression and union by rank, iterator version
|
||||
* @addtogroup base
|
||||
* @ingroup base
|
||||
*/
|
||||
template <class KEY>
|
||||
class DSFMap {
|
||||
|
|
|
@ -33,7 +33,7 @@ namespace gtsam {
|
|||
* A fast implementation of disjoint set forests that uses vector as underly data structure.
|
||||
* This is the absolute minimal DSF data structure, and only allows size_t keys
|
||||
* Uses rank compression but not union by rank :-(
|
||||
* @addtogroup base
|
||||
* @ingroup base
|
||||
*/
|
||||
class GTSAM_EXPORT DSFBase {
|
||||
|
||||
|
@ -59,7 +59,7 @@ public:
|
|||
|
||||
/**
|
||||
* DSFVector additionally keeps a vector of keys to support more expensive operations
|
||||
* @addtogroup base
|
||||
* @ingroup base
|
||||
*/
|
||||
class GTSAM_EXPORT DSFVector: public DSFBase {
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ namespace gtsam {
|
|||
* convenience to avoid having lengthy types in the code. Through timing,
|
||||
* we've seen that the fast_pool_allocator can lead to speedups of several
|
||||
* percent.
|
||||
* @addtogroup base
|
||||
* @ingroup base
|
||||
*/
|
||||
template<typename VALUE>
|
||||
class FastList: public std::list<VALUE, typename internal::FastDefaultAllocator<VALUE>::type> {
|
||||
|
|
|
@ -31,7 +31,7 @@ namespace gtsam {
|
|||
* convenience to avoid having lengthy types in the code. Through timing,
|
||||
* we've seen that the fast_pool_allocator can lead to speedups of several
|
||||
* percent.
|
||||
* @addtogroup base
|
||||
* @ingroup base
|
||||
*/
|
||||
template<typename KEY, typename VALUE>
|
||||
class FastMap : public std::map<KEY, VALUE, std::less<KEY>,
|
||||
|
|
|
@ -18,6 +18,10 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <boost/version.hpp>
|
||||
#if BOOST_VERSION >= 107400
|
||||
#include <boost/serialization/library_version_type.hpp>
|
||||
#endif
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/set.hpp>
|
||||
#include <gtsam/base/FastDefaultAllocator.h>
|
||||
|
@ -39,7 +43,7 @@ namespace gtsam {
|
|||
* fast_pool_allocator instead of the default STL allocator. This is just a
|
||||
* convenience to avoid having lengthy types in the code. Through timing,
|
||||
* we've seen that the fast_pool_allocator can lead to speedups of several %.
|
||||
* @addtogroup base
|
||||
* @ingroup base
|
||||
*/
|
||||
template<typename VALUE>
|
||||
class FastSet: public std::set<VALUE, std::less<VALUE>,
|
||||
|
|
|
@ -27,7 +27,7 @@ namespace gtsam {
|
|||
/**
|
||||
* FastVector is a type alias to a std::vector with a custom memory allocator.
|
||||
* The particular allocator depends on GTSAM's cmake configuration.
|
||||
* @addtogroup base
|
||||
* @ingroup base
|
||||
*/
|
||||
template <typename T>
|
||||
using FastVector =
|
||||
|
|
|
@ -95,7 +95,7 @@ template<class Class>
|
|||
struct MultiplicativeGroupTraits {
|
||||
typedef group_tag structure_category;
|
||||
typedef multiplicative_group_tag group_flavor;
|
||||
static Class Identity() { return Class::identity(); }
|
||||
static Class Identity() { return Class::Identity(); }
|
||||
static Class Compose(const Class &g, const Class & h) { return g * h;}
|
||||
static Class Between(const Class &g, const Class & h) { return g.inverse() * h;}
|
||||
static Class Inverse(const Class &g) { return g.inverse();}
|
||||
|
@ -111,7 +111,7 @@ template<class Class>
|
|||
struct AdditiveGroupTraits {
|
||||
typedef group_tag structure_category;
|
||||
typedef additive_group_tag group_flavor;
|
||||
static Class Identity() { return Class::identity(); }
|
||||
static Class Identity() { return Class::Identity(); }
|
||||
static Class Compose(const Class &g, const Class & h) { return g + h;}
|
||||
static Class Between(const Class &g, const Class & h) { return h - g;}
|
||||
static Class Inverse(const Class &g) { return -g;}
|
||||
|
@ -147,7 +147,7 @@ public:
|
|||
DirectProduct(const G& g, const H& h):std::pair<G,H>(g,h) {}
|
||||
|
||||
// identity
|
||||
static DirectProduct identity() { return DirectProduct(); }
|
||||
static DirectProduct Identity() { return DirectProduct(); }
|
||||
|
||||
DirectProduct operator*(const DirectProduct& other) const {
|
||||
return DirectProduct(traits<G>::Compose(this->first, other.first),
|
||||
|
@ -181,7 +181,7 @@ public:
|
|||
DirectSum(const G& g, const H& h):std::pair<G,H>(g,h) {}
|
||||
|
||||
// identity
|
||||
static DirectSum identity() { return DirectSum(); }
|
||||
static DirectSum Identity() { return DirectSum(); }
|
||||
|
||||
DirectSum operator+(const DirectSum& other) const {
|
||||
return DirectSum(g()+other.g(), h()+other.h());
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
* @author Frank Dellaert
|
||||
* @author Mike Bosse
|
||||
* @author Duy Nguyen Ta
|
||||
* @author Yotam Stern
|
||||
*/
|
||||
|
||||
|
||||
|
@ -176,7 +177,7 @@ struct LieGroupTraits: GetDimensionImpl<Class, Class::dimension> {
|
|||
/// @name Group
|
||||
/// @{
|
||||
typedef multiplicative_group_tag group_flavor;
|
||||
static Class Identity() { return Class::identity();}
|
||||
static Class Identity() { return Class::Identity();}
|
||||
/// @}
|
||||
|
||||
/// @name Manifold
|
||||
|
@ -319,12 +320,28 @@ T expm(const Vector& x, int K=7) {
|
|||
}
|
||||
|
||||
/**
|
||||
* Linear interpolation between X and Y by coefficient t in [0, 1].
|
||||
* Linear interpolation between X and Y by coefficient t. Typically t \in [0,1],
|
||||
* but can also be used to extrapolate before pose X or after pose Y.
|
||||
*/
|
||||
template <typename T>
|
||||
T interpolate(const T& X, const T& Y, double t) {
|
||||
assert(t >= 0 && t <= 1);
|
||||
return traits<T>::Compose(X, traits<T>::Expmap(t * traits<T>::Logmap(traits<T>::Between(X, Y))));
|
||||
T interpolate(const T& X, const T& Y, double t,
|
||||
typename MakeOptionalJacobian<T, T>::type Hx = boost::none,
|
||||
typename MakeOptionalJacobian<T, T>::type Hy = boost::none) {
|
||||
if (Hx || Hy) {
|
||||
typename MakeJacobian<T, T>::type between_H_x, log_H, exp_H, compose_H_x;
|
||||
const T between =
|
||||
traits<T>::Between(X, Y, between_H_x); // between_H_y = identity
|
||||
typename traits<T>::TangentVector delta = traits<T>::Logmap(between, log_H);
|
||||
const T Delta = traits<T>::Expmap(t * delta, exp_H);
|
||||
const T result = traits<T>::Compose(
|
||||
X, Delta, compose_H_x); // compose_H_xinv_y = identity
|
||||
|
||||
if (Hx) *Hx = compose_H_x + t * exp_H * log_H * between_H_x;
|
||||
if (Hy) *Hy = t * exp_H * log_H;
|
||||
return result;
|
||||
}
|
||||
return traits<T>::Compose(
|
||||
X, traits<T>::Expmap(t * traits<T>::Logmap(traits<T>::Between(X, Y))));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -353,4 +370,4 @@ public:
|
|||
* the gtsam namespace to be more easily enforced as testable
|
||||
*/
|
||||
#define GTSAM_CONCEPT_LIE_INST(T) template class gtsam::IsLieGroup<T>;
|
||||
#define GTSAM_CONCEPT_LIE_TYPE(T) typedef gtsam::IsLieGroup<T> _gtsam_IsLieGroup_##T;
|
||||
#define GTSAM_CONCEPT_LIE_TYPE(T) using _gtsam_IsLieGroup_##T = gtsam::IsLieGroup<T>;
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file LieMatrix.h
|
||||
* @brief External deprecation warning, see deprecated/LieMatrix.h for details
|
||||
* @author Paul Drews
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("LieMatrix.h is deprecated. Please use Eigen::Matrix instead.")
|
||||
#else
|
||||
#warning "LieMatrix.h is deprecated. Please use Eigen::Matrix instead."
|
||||
#endif
|
||||
|
||||
#include "gtsam/base/deprecated/LieMatrix.h"
|
|
@ -1,26 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file LieVector.h
|
||||
* @brief Deprecation warning for LieVector, see deprecated/LieVector.h for details.
|
||||
* @author Paul Drews
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("LieVector.h is deprecated. Please use Eigen::Vector instead.")
|
||||
#else
|
||||
#warning "LieVector.h is deprecated. Please use Eigen::Vector instead."
|
||||
#endif
|
||||
|
||||
#include <gtsam/base/deprecated/LieVector.h>
|
|
@ -178,4 +178,4 @@ struct FixedDimension {
|
|||
// * the gtsam namespace to be more easily enforced as testable
|
||||
// */
|
||||
#define GTSAM_CONCEPT_MANIFOLD_INST(T) template class gtsam::IsManifold<T>;
|
||||
#define GTSAM_CONCEPT_MANIFOLD_TYPE(T) typedef gtsam::IsManifold<T> _gtsam_IsManifold_##T;
|
||||
#define GTSAM_CONCEPT_MANIFOLD_TYPE(T) using _gtsam_IsManifold_##T = gtsam::IsManifold<T>;
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
#include <boost/tokenizer.hpp>
|
||||
#include <boost/format.hpp>
|
||||
|
||||
#include <cstdarg>
|
||||
#include <cstring>
|
||||
|
|
|
@ -26,12 +26,9 @@
|
|||
|
||||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/config.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <boost/function.hpp>
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
#include <boost/math/special_functions/fpclassify.hpp>
|
||||
|
||||
#include <vector>
|
||||
|
||||
/**
|
||||
* Matrix is a typedef in the gtsam namespace
|
||||
|
@ -46,28 +43,28 @@ typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> M
|
|||
// Create handy typedefs and constants for square-size matrices
|
||||
// MatrixMN, MatrixN = MatrixNN, I_NxN, and Z_NxN, for M,N=1..9
|
||||
#define GTSAM_MAKE_MATRIX_DEFS(N) \
|
||||
typedef Eigen::Matrix<double, N, N> Matrix##N; \
|
||||
typedef Eigen::Matrix<double, 1, N> Matrix1##N; \
|
||||
typedef Eigen::Matrix<double, 2, N> Matrix2##N; \
|
||||
typedef Eigen::Matrix<double, 3, N> Matrix3##N; \
|
||||
typedef Eigen::Matrix<double, 4, N> Matrix4##N; \
|
||||
typedef Eigen::Matrix<double, 5, N> Matrix5##N; \
|
||||
typedef Eigen::Matrix<double, 6, N> Matrix6##N; \
|
||||
typedef Eigen::Matrix<double, 7, N> Matrix7##N; \
|
||||
typedef Eigen::Matrix<double, 8, N> Matrix8##N; \
|
||||
typedef Eigen::Matrix<double, 9, N> Matrix9##N; \
|
||||
using Matrix##N = Eigen::Matrix<double, N, N>; \
|
||||
using Matrix1##N = Eigen::Matrix<double, 1, N>; \
|
||||
using Matrix2##N = Eigen::Matrix<double, 2, N>; \
|
||||
using Matrix3##N = Eigen::Matrix<double, 3, N>; \
|
||||
using Matrix4##N = Eigen::Matrix<double, 4, N>; \
|
||||
using Matrix5##N = Eigen::Matrix<double, 5, N>; \
|
||||
using Matrix6##N = Eigen::Matrix<double, 6, N>; \
|
||||
using Matrix7##N = Eigen::Matrix<double, 7, N>; \
|
||||
using Matrix8##N = Eigen::Matrix<double, 8, N>; \
|
||||
using Matrix9##N = Eigen::Matrix<double, 9, N>; \
|
||||
static const Eigen::MatrixBase<Matrix##N>::IdentityReturnType I_##N##x##N = Matrix##N::Identity(); \
|
||||
static const Eigen::MatrixBase<Matrix##N>::ConstantReturnType Z_##N##x##N = Matrix##N::Zero();
|
||||
|
||||
GTSAM_MAKE_MATRIX_DEFS(1);
|
||||
GTSAM_MAKE_MATRIX_DEFS(2);
|
||||
GTSAM_MAKE_MATRIX_DEFS(3);
|
||||
GTSAM_MAKE_MATRIX_DEFS(4);
|
||||
GTSAM_MAKE_MATRIX_DEFS(5);
|
||||
GTSAM_MAKE_MATRIX_DEFS(6);
|
||||
GTSAM_MAKE_MATRIX_DEFS(7);
|
||||
GTSAM_MAKE_MATRIX_DEFS(8);
|
||||
GTSAM_MAKE_MATRIX_DEFS(9);
|
||||
GTSAM_MAKE_MATRIX_DEFS(1)
|
||||
GTSAM_MAKE_MATRIX_DEFS(2)
|
||||
GTSAM_MAKE_MATRIX_DEFS(3)
|
||||
GTSAM_MAKE_MATRIX_DEFS(4)
|
||||
GTSAM_MAKE_MATRIX_DEFS(5)
|
||||
GTSAM_MAKE_MATRIX_DEFS(6)
|
||||
GTSAM_MAKE_MATRIX_DEFS(7)
|
||||
GTSAM_MAKE_MATRIX_DEFS(8)
|
||||
GTSAM_MAKE_MATRIX_DEFS(9)
|
||||
|
||||
// Matrix expressions for accessing parts of matrices
|
||||
typedef Eigen::Block<Matrix> SubMatrix;
|
||||
|
@ -489,7 +486,7 @@ struct MultiplyWithInverseFunction {
|
|||
|
||||
// The function phi should calculate f(a)*b, with derivatives in a and b.
|
||||
// Naturally, the derivative in b is f(a).
|
||||
typedef boost::function<VectorN(
|
||||
typedef std::function<VectorN(
|
||||
const T&, const VectorN&, OptionalJacobian<N, M>, OptionalJacobian<N, N>)>
|
||||
Operator;
|
||||
|
||||
|
@ -523,82 +520,4 @@ GTSAM_EXPORT Matrix LLt(const Matrix& A);
|
|||
GTSAM_EXPORT Matrix RtR(const Matrix& A);
|
||||
|
||||
GTSAM_EXPORT Vector columnNormSquare(const Matrix &A);
|
||||
} // namespace gtsam
|
||||
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/array.hpp>
|
||||
#include <boost/serialization/split_free.hpp>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
|
||||
/**
|
||||
* Ref. https://stackoverflow.com/questions/18382457/eigen-and-boostserialize/22903063#22903063
|
||||
*
|
||||
* Eigen supports calling resize() on both static and dynamic matrices.
|
||||
* This allows for a uniform API, with resize having no effect if the static matrix
|
||||
* is already the correct size.
|
||||
* https://eigen.tuxfamily.org/dox/group__TutorialMatrixClass.html#TutorialMatrixSizesResizing
|
||||
*
|
||||
* We use all the Matrix template parameters to ensure wide compatibility.
|
||||
*
|
||||
* eigen_typekit in ROS uses the same code
|
||||
* http://docs.ros.org/lunar/api/eigen_typekit/html/eigen__mqueue_8cpp_source.html
|
||||
*/
|
||||
|
||||
// split version - sends sizes ahead
|
||||
template<class Archive,
|
||||
typename Scalar_,
|
||||
int Rows_,
|
||||
int Cols_,
|
||||
int Ops_,
|
||||
int MaxRows_,
|
||||
int MaxCols_>
|
||||
void save(Archive & ar,
|
||||
const Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||
const unsigned int /*version*/) {
|
||||
const size_t rows = m.rows(), cols = m.cols();
|
||||
ar << BOOST_SERIALIZATION_NVP(rows);
|
||||
ar << BOOST_SERIALIZATION_NVP(cols);
|
||||
ar << make_nvp("data", make_array(m.data(), m.size()));
|
||||
}
|
||||
|
||||
template<class Archive,
|
||||
typename Scalar_,
|
||||
int Rows_,
|
||||
int Cols_,
|
||||
int Ops_,
|
||||
int MaxRows_,
|
||||
int MaxCols_>
|
||||
void load(Archive & ar,
|
||||
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||
const unsigned int /*version*/) {
|
||||
size_t rows, cols;
|
||||
ar >> BOOST_SERIALIZATION_NVP(rows);
|
||||
ar >> BOOST_SERIALIZATION_NVP(cols);
|
||||
m.resize(rows, cols);
|
||||
ar >> make_nvp("data", make_array(m.data(), m.size()));
|
||||
}
|
||||
|
||||
// templated version of BOOST_SERIALIZATION_SPLIT_FREE(Eigen::Matrix);
|
||||
template<class Archive,
|
||||
typename Scalar_,
|
||||
int Rows_,
|
||||
int Cols_,
|
||||
int Ops_,
|
||||
int MaxRows_,
|
||||
int MaxCols_>
|
||||
void serialize(Archive & ar,
|
||||
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||
const unsigned int version) {
|
||||
split_free(ar, m, version);
|
||||
}
|
||||
|
||||
// specialized to Matrix for MATLAB wrapper
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, gtsam::Matrix& m, const unsigned int version) {
|
||||
split_free(ar, m, version);
|
||||
}
|
||||
|
||||
} // namespace serialization
|
||||
} // namespace boost
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file MatrixSerialization.h
|
||||
* @brief Serialization for matrices
|
||||
* @author Frank Dellaert
|
||||
* @date February 2022
|
||||
*/
|
||||
|
||||
// \callgraph
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
|
||||
#include <boost/serialization/array.hpp>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/split_free.hpp>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
|
||||
/**
|
||||
* Ref.
|
||||
* https://stackoverflow.com/questions/18382457/eigen-and-boostserialize/22903063#22903063
|
||||
*
|
||||
* Eigen supports calling resize() on both static and dynamic matrices.
|
||||
* This allows for a uniform API, with resize having no effect if the static
|
||||
* matrix is already the correct size.
|
||||
* https://eigen.tuxfamily.org/dox/group__TutorialMatrixClass.html#TutorialMatrixSizesResizing
|
||||
*
|
||||
* We use all the Matrix template parameters to ensure wide compatibility.
|
||||
*
|
||||
* eigen_typekit in ROS uses the same code
|
||||
* http://docs.ros.org/lunar/api/eigen_typekit/html/eigen__mqueue_8cpp_source.html
|
||||
*/
|
||||
|
||||
// split version - sends sizes ahead
|
||||
template <class Archive, typename Scalar_, int Rows_, int Cols_, int Ops_,
|
||||
int MaxRows_, int MaxCols_>
|
||||
void save(
|
||||
Archive& ar,
|
||||
const Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_>& m,
|
||||
const unsigned int /*version*/) {
|
||||
const size_t rows = m.rows(), cols = m.cols();
|
||||
ar << BOOST_SERIALIZATION_NVP(rows);
|
||||
ar << BOOST_SERIALIZATION_NVP(cols);
|
||||
ar << make_nvp("data", make_array(m.data(), m.size()));
|
||||
}
|
||||
|
||||
template <class Archive, typename Scalar_, int Rows_, int Cols_, int Ops_,
|
||||
int MaxRows_, int MaxCols_>
|
||||
void load(Archive& ar,
|
||||
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_>& m,
|
||||
const unsigned int /*version*/) {
|
||||
size_t rows, cols;
|
||||
ar >> BOOST_SERIALIZATION_NVP(rows);
|
||||
ar >> BOOST_SERIALIZATION_NVP(cols);
|
||||
m.resize(rows, cols);
|
||||
ar >> make_nvp("data", make_array(m.data(), m.size()));
|
||||
}
|
||||
|
||||
// templated version of BOOST_SERIALIZATION_SPLIT_FREE(Eigen::Matrix);
|
||||
template <class Archive, typename Scalar_, int Rows_, int Cols_, int Ops_,
|
||||
int MaxRows_, int MaxCols_>
|
||||
void serialize(
|
||||
Archive& ar,
|
||||
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_>& m,
|
||||
const unsigned int version) {
|
||||
split_free(ar, m, version);
|
||||
}
|
||||
|
||||
// specialized to Matrix for MATLAB wrapper
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, gtsam::Matrix& m, const unsigned int version) {
|
||||
split_free(ar, m, version);
|
||||
}
|
||||
|
||||
} // namespace serialization
|
||||
} // namespace boost
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue