Merge branch 'develop' into fix/foot-imu
						commit
						6543fb6c7c
					
				|  | @ -0,0 +1,87 @@ | |||
| #!/bin/bash | ||||
| 
 | ||||
| ########################################################## | ||||
| # Build and test the GTSAM Python wrapper. | ||||
| ########################################################## | ||||
| 
 | ||||
| set -x -e | ||||
| 
 | ||||
| # install TBB with _debug.so files | ||||
| function install_tbb() | ||||
| { | ||||
|   TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download | ||||
|   TBB_VERSION=4.4.5 | ||||
|   TBB_DIR=tbb44_20160526oss | ||||
|   TBB_SAVEPATH="/tmp/tbb.tgz" | ||||
| 
 | ||||
|   if [ "$(uname)" == "Linux" ]; then | ||||
|     OS_SHORT="lin" | ||||
|     TBB_LIB_DIR="intel64/gcc4.4" | ||||
|     SUDO="sudo" | ||||
| 
 | ||||
|   elif [ "$(uname)" == "Darwin" ]; then | ||||
|     OS_SHORT="osx" | ||||
|     TBB_LIB_DIR="" | ||||
|     SUDO="" | ||||
| 
 | ||||
|   fi | ||||
| 
 | ||||
|   wget "${TBB_BASEURL}/${TBB_VERSION}/${TBB_DIR}_${OS_SHORT}.tgz" -O $TBB_SAVEPATH | ||||
|   tar -C /tmp -xf $TBB_SAVEPATH | ||||
| 
 | ||||
|   TBBROOT=/tmp/$TBB_DIR | ||||
|   # Copy the needed files to the correct places. | ||||
|   # This works correctly for CI builds, instead of setting path variables. | ||||
|   # This is what Homebrew does to install TBB on Macs | ||||
|   $SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/ | ||||
|   $SUDO cp -R $TBBROOT/include/ /usr/local/include/ | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| if [ -z ${PYTHON_VERSION+x} ]; then | ||||
|     echo "Please provide the Python version to build against!" | ||||
|     exit 127 | ||||
| fi | ||||
| 
 | ||||
| PYTHON="python${PYTHON_VERSION}" | ||||
| 
 | ||||
| if [[ $(uname) == "Darwin" ]]; then | ||||
|     brew install wget | ||||
| else | ||||
|     # Install a system package required by our library | ||||
|     sudo apt-get install -y wget libicu-dev python3-pip python3-setuptools | ||||
| fi | ||||
| 
 | ||||
| PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin | ||||
| 
 | ||||
| [ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb | ||||
| 
 | ||||
| 
 | ||||
| BUILD_PYBIND="ON" | ||||
| TYPEDEF_POINTS_TO_VECTORS="ON" | ||||
| 
 | ||||
| sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt | ||||
| 
 | ||||
| mkdir $GITHUB_WORKSPACE/build | ||||
| cd $GITHUB_WORKSPACE/build | ||||
| 
 | ||||
| cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release \ | ||||
|     -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \ | ||||
|     -DGTSAM_USE_QUATERNIONS=OFF \ | ||||
|     -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \ | ||||
|     -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ | ||||
|     -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ | ||||
|     -DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \ | ||||
|     -DGTSAM_TYPEDEF_POINTS_TO_VECTORS=${TYPEDEF_POINTS_TO_VECTORS} \ | ||||
|     -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \ | ||||
|     -DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \ | ||||
|     -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \ | ||||
|     -DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install | ||||
| 
 | ||||
| make -j$(nproc) install | ||||
| 
 | ||||
| 
 | ||||
| cd $GITHUB_WORKSPACE/build/python | ||||
| $PYTHON setup.py install --user --prefix= | ||||
| cd $GITHUB_WORKSPACE/python/gtsam/tests | ||||
| $PYTHON -m unittest discover | ||||
|  | @ -0,0 +1,122 @@ | |||
| #!/bin/bash | ||||
| 
 | ||||
| ########################################################## | ||||
| # Build and test GTSAM for *nix based systems. | ||||
| # Specifically Linux and macOS. | ||||
| ########################################################## | ||||
| 
 | ||||
| # install TBB with _debug.so files | ||||
| function install_tbb() | ||||
| { | ||||
|   TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download | ||||
|   TBB_VERSION=4.4.5 | ||||
|   TBB_DIR=tbb44_20160526oss | ||||
|   TBB_SAVEPATH="/tmp/tbb.tgz" | ||||
| 
 | ||||
|   if [ "$(uname)" == "Linux" ]; then | ||||
|     OS_SHORT="lin" | ||||
|     TBB_LIB_DIR="intel64/gcc4.4" | ||||
|     SUDO="sudo" | ||||
| 
 | ||||
|   elif [ "$(uname)" == "Darwin" ]; then | ||||
|     OS_SHORT="osx" | ||||
|     TBB_LIB_DIR="" | ||||
|     SUDO="" | ||||
| 
 | ||||
|   fi | ||||
| 
 | ||||
|   wget "${TBB_BASEURL}/${TBB_VERSION}/${TBB_DIR}_${OS_SHORT}.tgz" -O $TBB_SAVEPATH | ||||
|   tar -C /tmp -xf $TBB_SAVEPATH | ||||
| 
 | ||||
|   TBBROOT=/tmp/$TBB_DIR | ||||
|   # Copy the needed files to the correct places. | ||||
|   # This works correctly for CI builds, instead of setting path variables. | ||||
|   # This is what Homebrew does to install TBB on Macs | ||||
|   $SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/ | ||||
|   $SUDO cp -R $TBBROOT/include/ /usr/local/include/ | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| # common tasks before either build or test | ||||
| function configure() | ||||
| { | ||||
|   set -e   # Make sure any error makes the script to return an error code | ||||
|   set -x   # echo | ||||
| 
 | ||||
|   SOURCE_DIR=$GITHUB_WORKSPACE | ||||
|   BUILD_DIR=$GITHUB_WORKSPACE/build | ||||
| 
 | ||||
|   #env | ||||
|   rm -fr $BUILD_DIR || true | ||||
|   mkdir $BUILD_DIR && cd $BUILD_DIR | ||||
| 
 | ||||
|   [ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb | ||||
| 
 | ||||
|   if [ ! -z "$GCC_VERSION" ]; then | ||||
|     export CC=gcc-$GCC_VERSION | ||||
|     export CXX=g++-$GCC_VERSION | ||||
|   fi | ||||
| 
 | ||||
|   # GTSAM_BUILD_WITH_MARCH_NATIVE=OFF: to avoid crashes in builder VMs | ||||
|   cmake $SOURCE_DIR \ | ||||
|       -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Debug} \ | ||||
|       -DGTSAM_BUILD_TESTS=${GTSAM_BUILD_TESTS:-OFF} \ | ||||
|       -DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \ | ||||
|       -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \ | ||||
|       -DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \ | ||||
|       -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \ | ||||
|       -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \ | ||||
|       -DGTSAM_ROT3_EXPMAP=${GTSAM_ROT3_EXPMAP:-ON} \ | ||||
|       -DGTSAM_POSE3_EXPMAP=${GTSAM_POSE3_EXPMAP:-ON} \ | ||||
|       -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ | ||||
|       -DBOOST_ROOT=$BOOST_ROOT \ | ||||
|       -DBoost_NO_SYSTEM_PATHS=ON \ | ||||
|       -DBoost_ARCHITECTURE=-x64 | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| # common tasks after either build or test | ||||
| function finish () | ||||
| { | ||||
|   # Print ccache stats | ||||
|   [ -x "$(command -v ccache)" ] && ccache -s | ||||
| 
 | ||||
|   cd $SOURCE_DIR | ||||
| } | ||||
| 
 | ||||
| # compile the code with the intent of populating the cache | ||||
| function build () | ||||
| { | ||||
|   export GTSAM_BUILD_EXAMPLES_ALWAYS=ON | ||||
|   export GTSAM_BUILD_TESTS=OFF | ||||
| 
 | ||||
|   configure | ||||
| 
 | ||||
|   make -j2 | ||||
| 
 | ||||
|   finish | ||||
| } | ||||
| 
 | ||||
| # run the tests | ||||
| function test () | ||||
| { | ||||
|   export GTSAM_BUILD_EXAMPLES_ALWAYS=OFF | ||||
|   export GTSAM_BUILD_TESTS=ON | ||||
| 
 | ||||
|   configure | ||||
| 
 | ||||
|   # Actual build: | ||||
|   make -j2 check | ||||
| 
 | ||||
|   finish | ||||
| } | ||||
| 
 | ||||
| # select between build or test | ||||
| case $1 in | ||||
|   -b) | ||||
|     build | ||||
|     ;; | ||||
|   -t) | ||||
|     test | ||||
|     ;; | ||||
| esac | ||||
|  | @ -0,0 +1,91 @@ | |||
| name: Linux CI | ||||
| 
 | ||||
| on: [push, pull_request] | ||||
| 
 | ||||
| jobs: | ||||
|   build: | ||||
|     name: ${{ matrix.name }} ${{ matrix.build_type }} | ||||
|     runs-on: ${{ matrix.os }} | ||||
| 
 | ||||
|     env: | ||||
|       CTEST_OUTPUT_ON_FAILURE: ON | ||||
|       CTEST_PARALLEL_LEVEL: 2 | ||||
|       CMAKE_BUILD_TYPE: ${{ matrix.build_type }} | ||||
|       GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} | ||||
| 
 | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         # Github Actions requires a single row to be added to the build matrix. | ||||
|         # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. | ||||
|         name: [ | ||||
|           ubuntu-18.04-gcc-5, | ||||
|           ubuntu-18.04-gcc-9, | ||||
|           ubuntu-18.04-clang-9, | ||||
|         ] | ||||
| 
 | ||||
|         build_type: [Debug, Release] | ||||
|         build_unstable: [ON] | ||||
|         include: | ||||
|           - name: ubuntu-18.04-gcc-5 | ||||
|             os: ubuntu-18.04 | ||||
|             compiler: gcc | ||||
|             version: "5" | ||||
| 
 | ||||
|           - name: ubuntu-18.04-gcc-9 | ||||
|             os: ubuntu-18.04 | ||||
|             compiler: gcc | ||||
|             version: "9" | ||||
| 
 | ||||
|           - name: ubuntu-18.04-clang-9 | ||||
|             os: ubuntu-18.04 | ||||
|             compiler: clang | ||||
|             version: "9" | ||||
| 
 | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@master | ||||
|       - name: Install (Linux) | ||||
|         if: runner.os == 'Linux' | ||||
|         run: | | ||||
|           # LLVM (clang) 9 is not in Bionic's repositories so we add the official LLVM repository. | ||||
|           if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then | ||||
|             # (ipv4|ha).pool.sks-keyservers.net is the SKS GPG global keyserver pool | ||||
|             # ipv4 avoids potential timeouts because of crappy IPv6 infrastructure | ||||
|             # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository | ||||
|             # This key is not in the keystore by default for Ubuntu so we need to add it. | ||||
|             LLVM_KEY=15CF4D18AF4F7421 | ||||
|             gpg --keyserver ipv4.pool.sks-keyservers.net --recv-key $LLVM_KEY || gpg --keyserver ha.pool.sks-keyservers.net --recv-key $LLVM_KEY | ||||
|             gpg -a --export $LLVM_KEY | sudo apt-key add - | ||||
|             sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" | ||||
|           fi | ||||
|           sudo apt-get -y update | ||||
| 
 | ||||
|           sudo apt install cmake build-essential pkg-config libpython-dev python-numpy | ||||
|            | ||||
|           echo "BOOST_ROOT=$(echo $BOOST_ROOT_1_72_0)" >> $GITHUB_ENV | ||||
|           echo "LD_LIBRARY_PATH=$(echo $BOOST_ROOT_1_72_0/lib)" >> $GITHUB_ENV | ||||
|            | ||||
|           if [ "${{ matrix.compiler }}" = "gcc" ]; then | ||||
|             sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib | ||||
|             echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|             echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|           else | ||||
|             sudo apt-get install -y clang-${{ matrix.version }} g++-multilib | ||||
|             echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|             echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|           fi | ||||
|       - name: Check Boost version | ||||
|         if: runner.os == 'Linux' | ||||
|         run: | | ||||
|           echo "BOOST_ROOT = $BOOST_ROOT" | ||||
|       - name: Build and Test (Linux) | ||||
|         if: runner.os == 'Linux' | ||||
|         run: | | ||||
|           bash .github/scripts/unix.sh -t | ||||
|       - name: Upload build directory | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         if: matrix.build_type  == 'Release' | ||||
|         with: | ||||
|           name: gtsam-${{ matrix.name }}-${{ matrix.build_type }} | ||||
|           path: ${{ github.workspace }}/build/ | ||||
|  | @ -0,0 +1,59 @@ | |||
| name: macOS CI | ||||
| 
 | ||||
| on: [pull_request] | ||||
| 
 | ||||
| jobs: | ||||
|   build: | ||||
|     name: ${{ matrix.name }} ${{ matrix.build_type }} | ||||
|     runs-on: ${{ matrix.os }} | ||||
| 
 | ||||
|     env: | ||||
|       CTEST_OUTPUT_ON_FAILURE: ON | ||||
|       CTEST_PARALLEL_LEVEL: 2 | ||||
|       CMAKE_BUILD_TYPE: ${{ matrix.build_type }} | ||||
|       GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         # Github Actions requires a single row to be added to the build matrix. | ||||
|         # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. | ||||
|         name: [ | ||||
|           macOS-10.15-xcode-11.3.1, | ||||
|         ] | ||||
| 
 | ||||
|         build_type: [Debug, Release] | ||||
|         build_unstable: [ON] | ||||
|         include: | ||||
|           - name: macOS-10.15-xcode-11.3.1 | ||||
|             os: macOS-10.15 | ||||
|             compiler: xcode | ||||
|             version: "11.3.1" | ||||
| 
 | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@master | ||||
|       - name: Install (macOS) | ||||
|         if: runner.os == 'macOS' | ||||
|         run: | | ||||
|           brew tap ProfFan/robotics | ||||
|           brew install cmake ninja | ||||
|           brew install ProfFan/robotics/boost | ||||
|           if [ "${{ matrix.compiler }}" = "gcc" ]; then | ||||
|             brew install gcc@${{ matrix.version }} | ||||
|             echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|             echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|           else | ||||
|             sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app | ||||
|             echo "CC=clang" >> $GITHUB_ENV | ||||
|             echo "CXX=clang++" >> $GITHUB_ENV | ||||
|           fi | ||||
|       - name: Build and Test (macOS) | ||||
|         if: runner.os == 'macOS' | ||||
|         run: | | ||||
|           bash .github/scripts/unix.sh -t | ||||
|       - name: Upload build directory | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         if: matrix.build_type == 'Release' | ||||
|         with: | ||||
|           name: gtsam-${{ matrix.name }}-${{ matrix.build_type }} | ||||
|           path: ${{ github.workspace }}/build/ | ||||
|  | @ -0,0 +1,113 @@ | |||
| name: Python CI | ||||
| 
 | ||||
| on: [pull_request] | ||||
| 
 | ||||
| jobs: | ||||
|   build: | ||||
|     name: ${{ matrix.name }} ${{ matrix.build_type }} Python ${{ matrix.python_version }} | ||||
|     runs-on: ${{ matrix.os }} | ||||
| 
 | ||||
|     env: | ||||
|       CTEST_OUTPUT_ON_FAILURE: ON | ||||
|       CTEST_PARALLEL_LEVEL: 2 | ||||
|       CMAKE_BUILD_TYPE: ${{ matrix.build_type }} | ||||
|       PYTHON_VERSION: ${{ matrix.python_version }} | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         # Github Actions requires a single row to be added to the build matrix. | ||||
|         # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. | ||||
|         name: [ | ||||
|           ubuntu-18.04-gcc-5, | ||||
|           ubuntu-18.04-gcc-9, | ||||
|           ubuntu-18.04-clang-9, | ||||
|           macOS-10.15-xcode-11.3.1, | ||||
|           ubuntu-18.04-gcc-5-tbb, | ||||
|         ] | ||||
| 
 | ||||
|         build_type: [Debug, Release] | ||||
|         python_version: [3] | ||||
|         include: | ||||
|           - name: ubuntu-18.04-gcc-5 | ||||
|             os: ubuntu-18.04 | ||||
|             compiler: gcc | ||||
|             version: "5" | ||||
| 
 | ||||
|           - name: ubuntu-18.04-gcc-9 | ||||
|             os: ubuntu-18.04 | ||||
|             compiler: gcc | ||||
|             version: "9" | ||||
| 
 | ||||
|           - name: ubuntu-18.04-clang-9 | ||||
|             os: ubuntu-18.04 | ||||
|             compiler: clang | ||||
|             version: "9" | ||||
| 
 | ||||
|           - name: macOS-10.15-xcode-11.3.1 | ||||
|             os: macOS-10.15 | ||||
|             compiler: xcode | ||||
|             version: "11.3.1" | ||||
| 
 | ||||
|           - name: ubuntu-18.04-gcc-5-tbb | ||||
|             os: ubuntu-18.04 | ||||
|             compiler: gcc | ||||
|             version: "5" | ||||
|             flag: tbb | ||||
| 
 | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@master | ||||
|       - name: Install (Linux) | ||||
|         if: runner.os == 'Linux' | ||||
|         run: | | ||||
|           if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then | ||||
|             # (ipv4|ha).pool.sks-keyservers.net is the SKS GPG global keyserver pool | ||||
|             # ipv4 avoids potential timeouts because of crappy IPv6 infrastructure | ||||
|             # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository | ||||
|             # This key is not in the keystore by default for Ubuntu so we need to add it. | ||||
|             LLVM_KEY=15CF4D18AF4F7421 | ||||
|             gpg --keyserver ipv4.pool.sks-keyservers.net --recv-key $LLVM_KEY || gpg --keyserver ha.pool.sks-keyservers.net --recv-key $LLVM_KEY | ||||
|             gpg -a --export $LLVM_KEY | sudo apt-key add - | ||||
|             sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" | ||||
|           fi | ||||
|           sudo apt-get -y update | ||||
|            | ||||
|           sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev | ||||
|            | ||||
|           if [ "${{ matrix.compiler }}" = "gcc" ]; then | ||||
|             sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib | ||||
|             echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|             echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|           else | ||||
|             sudo apt-get install -y clang-${{ matrix.version }} g++-multilib | ||||
|             echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|             echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|           fi | ||||
|       - name: Install (macOS) | ||||
|         if: runner.os == 'macOS' | ||||
|         run: | | ||||
|           brew tap ProfFan/robotics | ||||
|           brew install cmake ninja | ||||
|           brew install ProfFan/robotics/boost | ||||
|           if [ "${{ matrix.compiler }}" = "gcc" ]; then | ||||
|             brew install gcc@${{ matrix.version }} | ||||
|             echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|             echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|           else | ||||
|             sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app | ||||
|             echo "CC=clang" >> $GITHUB_ENV | ||||
|             echo "CXX=clang++" >> $GITHUB_ENV | ||||
|           fi | ||||
|       - name: Set GTSAM_WITH_TBB Flag | ||||
|         if: matrix.flag == 'tbb' | ||||
|         run: | | ||||
|           echo "GTSAM_WITH_TBB=ON" >> $GITHUB_ENV | ||||
|           echo "GTSAM Uses TBB" | ||||
|       - name: Build (Linux) | ||||
|         if: runner.os == 'Linux' | ||||
|         run: | | ||||
|           bash .github/scripts/python.sh | ||||
|       - name: Build (macOS) | ||||
|         if: runner.os == 'macOS' | ||||
|         run: | | ||||
|           bash .github/scripts/python.sh | ||||
|  | @ -0,0 +1,128 @@ | |||
| name: Special Cases CI | ||||
| 
 | ||||
| on: [pull_request] | ||||
| 
 | ||||
| jobs: | ||||
|   build: | ||||
|     name: ${{ matrix.name }} ${{ matrix.build_type }} | ||||
|     runs-on: ${{ matrix.os }} | ||||
| 
 | ||||
|     env: | ||||
|       CTEST_OUTPUT_ON_FAILURE: ON | ||||
|       CTEST_PARALLEL_LEVEL: 2 | ||||
|       CMAKE_BUILD_TYPE: ${{ matrix.build_type }} | ||||
|       GTSAM_BUILD_UNSTABLE: ON | ||||
| 
 | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
| 
 | ||||
|       matrix: | ||||
|         # Github Actions requires a single row to be added to the build matrix. | ||||
|         # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. | ||||
|         name: | ||||
|           [ | ||||
|             ubuntu-gcc-deprecated, | ||||
|             ubuntu-gcc-quaternions, | ||||
|             ubuntu-gcc-tbb, | ||||
|             ubuntu-cayleymap, | ||||
|           ] | ||||
| 
 | ||||
|         build_type: [Debug, Release] | ||||
| 
 | ||||
|         include: | ||||
|           - name: ubuntu-gcc-deprecated | ||||
|             os: ubuntu-18.04 | ||||
|             compiler: gcc | ||||
|             version: "9" | ||||
|             flag: deprecated | ||||
| 
 | ||||
|           - name: ubuntu-gcc-quaternions | ||||
|             os: ubuntu-18.04 | ||||
|             compiler: gcc | ||||
|             version: "9" | ||||
|             flag: quaternions | ||||
| 
 | ||||
|           - name: ubuntu-gcc-tbb | ||||
|             os: ubuntu-18.04 | ||||
|             compiler: gcc | ||||
|             version: "9" | ||||
|             flag: tbb | ||||
| 
 | ||||
|           - name: ubuntu-cayleymap | ||||
|             os: ubuntu-18.04 | ||||
|             compiler: gcc | ||||
|             version: "9" | ||||
|             flag: cayley | ||||
| 
 | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@master | ||||
| 
 | ||||
|       - name: Install (Linux) | ||||
|         if: runner.os == 'Linux' | ||||
|         run: | | ||||
|           # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. | ||||
|           if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then | ||||
|             gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421 | ||||
|             gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - | ||||
|             sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" | ||||
|           fi | ||||
|           sudo apt-get -y update | ||||
| 
 | ||||
|           sudo apt install cmake build-essential pkg-config libpython-dev python-numpy | ||||
| 
 | ||||
|           echo "BOOST_ROOT=$(echo $BOOST_ROOT_1_72_0)" >> $GITHUB_ENV | ||||
|           echo "LD_LIBRARY_PATH=$(echo $BOOST_ROOT_1_72_0/lib)" >> $GITHUB_ENV | ||||
| 
 | ||||
|           if [ "${{ matrix.compiler }}" = "gcc" ]; then | ||||
|             sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib | ||||
|             echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|             echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|           else | ||||
|             sudo apt-get install -y clang-${{ matrix.version }} g++-multilib | ||||
|             echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|             echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|           fi | ||||
| 
 | ||||
|       - name: Install (macOS) | ||||
|         if: runner.os == 'macOS' | ||||
|         run: | | ||||
|           brew install cmake ninja boost | ||||
|           if [ "${{ matrix.compiler }}" = "gcc" ]; then | ||||
|             brew install gcc@${{ matrix.version }} | ||||
|             echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|             echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV | ||||
|           else | ||||
|             sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app | ||||
|             echo "CC=clang" >> $GITHUB_ENV | ||||
|             echo "CXX=clang++" >> $GITHUB_ENV | ||||
|             fi | ||||
| 
 | ||||
|       - name: Set Allow Deprecated Flag | ||||
|         if: matrix.flag == 'deprecated' | ||||
|         run: | | ||||
|           echo "GTSAM_ALLOW_DEPRECATED_SINCE_V41=ON" >> $GITHUB_ENV | ||||
|           echo "Allow deprecated since version 4.1" | ||||
| 
 | ||||
|       - name: Set Use Quaternions Flag | ||||
|         if: matrix.flag == 'quaternions' | ||||
|         run: | | ||||
|           echo "GTSAM_USE_QUATERNIONS=ON" >> $GITHUB_ENV | ||||
|           echo "Use Quaternions for rotations" | ||||
| 
 | ||||
|       - name: Set GTSAM_WITH_TBB Flag | ||||
|         if: matrix.flag == 'tbb' | ||||
|         run: | | ||||
|           echo "GTSAM_WITH_TBB=ON" >> $GITHUB_ENV | ||||
|           echo "GTSAM Uses TBB" | ||||
| 
 | ||||
|       - name: Use Cayley Transform for Rot3 | ||||
|         if: matrix.flag == 'cayley' | ||||
|         run: | | ||||
|           echo "GTSAM_POSE3_EXPMAP=OFF" >> $GITHUB_ENV | ||||
|           echo "GTSAM_ROT3_EXPMAP=OFF" >> $GITHUB_ENV | ||||
|           echo "GTSAM Uses Cayley map for Rot3" | ||||
| 
 | ||||
|       - name: Build & Test | ||||
|         run: | | ||||
|           bash .github/scripts/unix.sh -t | ||||
|  | @ -0,0 +1,84 @@ | |||
| name: Windows CI | ||||
| 
 | ||||
| on: [pull_request] | ||||
| 
 | ||||
| jobs: | ||||
|   build: | ||||
|     name: ${{ matrix.name }} ${{ matrix.build_type }} | ||||
|     runs-on: ${{ matrix.os }} | ||||
| 
 | ||||
|     env: | ||||
|       CTEST_OUTPUT_ON_FAILURE: ON | ||||
|       CTEST_PARALLEL_LEVEL: 2 | ||||
|       CMAKE_BUILD_TYPE: ${{ matrix.build_type }} | ||||
|       GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         # Github Actions requires a single row to be added to the build matrix. | ||||
|         # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. | ||||
|         name: [ | ||||
|           #TODO This build keeps timing out, need to understand why. | ||||
|           # windows-2016-cl, | ||||
|           windows-2019-cl, | ||||
|         ] | ||||
| 
 | ||||
|         build_type: [Debug, Release] | ||||
|         build_unstable: [ON] | ||||
|         include: | ||||
| 
 | ||||
|           #TODO This build keeps timing out, need to understand why. | ||||
|           # - name: windows-2016-cl | ||||
|           #   os: windows-2016 | ||||
|           #   compiler: cl | ||||
| 
 | ||||
|           - name: windows-2019-cl | ||||
|             os: windows-2019 | ||||
|             compiler: cl | ||||
| 
 | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@master | ||||
|       - name: Install (Windows) | ||||
|         if: runner.os == 'Windows' | ||||
|         shell: powershell | ||||
|         run: | | ||||
|           Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh') | ||||
|           scoop install ninja --global | ||||
|           if ("${{ matrix.compiler }}".StartsWith("clang")) { | ||||
|             scoop install llvm --global | ||||
|           } | ||||
|           if ("${{ matrix.compiler }}" -eq "gcc") { | ||||
|             # Chocolatey GCC is broken on the windows-2019 image. | ||||
|             # See: https://github.com/DaanDeMeyer/doctest/runs/231595515 | ||||
|             # See: https://github.community/t5/GitHub-Actions/Something-is-wrong-with-the-chocolatey-installed-version-of-gcc/td-p/32413 | ||||
|             scoop install gcc --global | ||||
|             echo "CC=gcc" >> $GITHUB_ENV | ||||
|             echo "CXX=g++" >> $GITHUB_ENV | ||||
|           } elseif ("${{ matrix.compiler }}" -eq "clang") { | ||||
|             echo "CC=clang" >> $GITHUB_ENV | ||||
|             echo "CXX=clang++" >> $GITHUB_ENV | ||||
|           } else { | ||||
|             echo "CC=${{ matrix.compiler }}" >> $GITHUB_ENV | ||||
|             echo "CXX=${{ matrix.compiler }}" >> $GITHUB_ENV | ||||
|           } | ||||
|           # Scoop modifies the PATH so we make the modified PATH global. | ||||
|           echo "$env:PATH" >> $GITHUB_PATH | ||||
|       - name: Build (Windows) | ||||
|         if: runner.os == 'Windows' | ||||
|         run: | | ||||
|           cmake -E remove_directory build | ||||
|           echo "BOOST_ROOT_1_72_0: ${env:BOOST_ROOT_1_72_0}" | ||||
|           cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT_1_72_0}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT_1_72_0}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT_1_72_0}\lib" | ||||
|           cmake --build build --config ${{ matrix.build_type }} --target gtsam | ||||
|           cmake --build build --config ${{ matrix.build_type }} --target gtsam_unstable | ||||
|           cmake --build build --config ${{ matrix.build_type }} --target wrap | ||||
|           cmake --build build --config ${{ matrix.build_type }} --target check.base | ||||
|           cmake --build build --config ${{ matrix.build_type }} --target check.base_unstable | ||||
|           cmake --build build --config ${{ matrix.build_type }} --target check.linear | ||||
|       - name: Upload build directory | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         if: matrix.build_type == 'Release' | ||||
|         with: | ||||
|           name: gtsam-${{ matrix.name }}-${{ matrix.build_type }} | ||||
|           path: ${{ github.workspace }}/build/ | ||||
|  | @ -0,0 +1,17 @@ | |||
| # This triggers Python builds on `gtsam-manylinux-build` | ||||
| name: Trigger Python Builds | ||||
| on: | ||||
|   push: | ||||
|     branches: | ||||
|       - develop | ||||
| jobs: | ||||
|   triggerPython: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|       - name: Repository Dispatch | ||||
|         uses: ProfFan/repository-dispatch@master | ||||
|         with: | ||||
|           token: ${{ secrets.PYTHON_CI_REPO_ACCESS_TOKEN }} | ||||
|           repository: borglab/gtsam-manylinux-build | ||||
|           event-type: python-wrapper | ||||
|           client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' | ||||
|  | @ -9,15 +9,10 @@ | |||
| *.txt.user | ||||
| *.txt.user.6d59f0c | ||||
| *.pydevproject | ||||
| cython/venv | ||||
| cython/gtsam.cpp | ||||
| cython/gtsam.cpython-35m-darwin.so | ||||
| cython/gtsam.pyx | ||||
| cython/gtsam.so | ||||
| cython/gtsam_wrapper.pxd | ||||
| .vscode | ||||
| .env | ||||
| /.vs/ | ||||
| /CMakeSettings.json | ||||
| # for QtCreator: | ||||
| CMakeLists.txt.user* | ||||
| xcode/ | ||||
|  |  | |||
							
								
								
									
										85
									
								
								.travis.sh
								
								
								
								
							
							
						
						
									
										85
									
								
								.travis.sh
								
								
								
								
							|  | @ -1,85 +0,0 @@ | |||
| #!/bin/bash | ||||
| 
 | ||||
| # common tasks before either build or test | ||||
| function prepare () | ||||
| { | ||||
|   set -e   # Make sure any error makes the script to return an error code | ||||
|   set -x   # echo | ||||
| 
 | ||||
|   SOURCE_DIR=`pwd` | ||||
|   BUILD_DIR=build | ||||
| 
 | ||||
|   #env | ||||
|   git clean -fd || true | ||||
|   rm -fr $BUILD_DIR || true | ||||
|   mkdir $BUILD_DIR && cd $BUILD_DIR | ||||
| 
 | ||||
|   if [ -z "$CMAKE_BUILD_TYPE" ]; then | ||||
|     CMAKE_BUILD_TYPE=Debug | ||||
|   fi | ||||
| 
 | ||||
|   if [ -z "$GTSAM_ALLOW_DEPRECATED_SINCE_V4" ]; then | ||||
|     GTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF | ||||
|   fi | ||||
| 
 | ||||
|   if [ ! -z "$GCC_VERSION" ]; then | ||||
|     sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-$GCC_VERSION 60 \ | ||||
|                          --slave /usr/bin/g++ g++ /usr/bin/g++-$GCC_VERSION | ||||
|     sudo update-alternatives --set gcc /usr/bin/gcc-$GCC_VERSION | ||||
|   fi | ||||
| } | ||||
| 
 | ||||
| # common tasks after either build or test | ||||
| function finish () | ||||
| { | ||||
|   # Print ccache stats | ||||
|   ccache -s | ||||
| 
 | ||||
|   cd $SOURCE_DIR | ||||
| } | ||||
| 
 | ||||
| # compile the code with the intent of populating the cache | ||||
| function build () | ||||
| { | ||||
|   prepare | ||||
| 
 | ||||
|   cmake $SOURCE_DIR \ | ||||
|       -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE \ | ||||
|       -DGTSAM_BUILD_TESTS=OFF \ | ||||
|       -DGTSAM_BUILD_UNSTABLE=$GTSAM_BUILD_UNSTABLE \ | ||||
|       -DGTSAM_BUILD_EXAMPLES_ALWAYS=ON \ | ||||
|       -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=$GTSAM_ALLOW_DEPRECATED_SINCE_V4 | ||||
| 
 | ||||
|   # Actual build: | ||||
|   VERBOSE=1 make -j2 | ||||
| 
 | ||||
|   finish | ||||
| } | ||||
| 
 | ||||
| # run the tests | ||||
| function test () | ||||
| { | ||||
|   prepare | ||||
| 
 | ||||
|   cmake $SOURCE_DIR \ | ||||
|       -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE \ | ||||
|       -DGTSAM_BUILD_TESTS=ON \ | ||||
|       -DGTSAM_BUILD_UNSTABLE=$GTSAM_BUILD_UNSTABLE \ | ||||
|       -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ | ||||
|       -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF | ||||
| 
 | ||||
|   # Actual build: | ||||
|   make -j2 check | ||||
| 
 | ||||
|   finish | ||||
| } | ||||
| 
 | ||||
| # select between build or test | ||||
| case $1 in | ||||
|   -b) | ||||
|     build | ||||
|     ;; | ||||
|   -t)                       | ||||
|     test | ||||
|     ;; | ||||
| esac | ||||
							
								
								
									
										113
									
								
								.travis.yml
								
								
								
								
							
							
						
						
									
										113
									
								
								.travis.yml
								
								
								
								
							|  | @ -1,113 +0,0 @@ | |||
| language: cpp | ||||
| cache: ccache | ||||
| sudo: required | ||||
| dist: xenial | ||||
| 
 | ||||
| addons: | ||||
|   apt: | ||||
|     sources: | ||||
|     - ubuntu-toolchain-r-test | ||||
|     packages: | ||||
|     - g++-8 | ||||
|     - clang-3.8 | ||||
|     - build-essential | ||||
|     - pkg-config | ||||
|     - cmake | ||||
|     - libpython-dev python-numpy | ||||
|     - libboost-all-dev | ||||
| 
 | ||||
| # before_install: | ||||
| #  - if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update          ; fi | ||||
| 
 | ||||
| install: | ||||
|   - if [ "$TRAVIS_OS_NAME" == "osx" ]; then HOMEBREW_NO_AUTO_UPDATE=1 brew install ccache  ; fi | ||||
|   - if [ "$TRAVIS_OS_NAME" == "osx" ]; then export PATH="/usr/local/opt/ccache/libexec:$PATH" ; fi | ||||
| 
 | ||||
| # We first do the compile stage specified below, then the matrix expansion specified after. | ||||
| stages: | ||||
|   - compile | ||||
|   - test | ||||
| 
 | ||||
| # Compile stage without building examples/tests to populate the caches. | ||||
| jobs: | ||||
|   include: | ||||
| # on Mac, GCC | ||||
|   - stage: compile | ||||
|     os: osx | ||||
|     compiler: gcc | ||||
|     env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF | ||||
|     script: bash .travis.sh -b | ||||
|   - stage: compile | ||||
|     os: osx | ||||
|     compiler: gcc | ||||
|     env: CMAKE_BUILD_TYPE=Release | ||||
|     script: bash .travis.sh -b | ||||
| # on Mac, CLANG | ||||
|   - stage: compile | ||||
|     os: osx | ||||
|     compiler: clang | ||||
|     env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF | ||||
|     script: bash .travis.sh -b | ||||
|   - stage: compile | ||||
|     os: osx | ||||
|     compiler: clang | ||||
|     env: CMAKE_BUILD_TYPE=Release | ||||
|     script: bash .travis.sh -b | ||||
| # on Linux, GCC | ||||
|   - stage: compile | ||||
|     os: linux | ||||
|     compiler: gcc | ||||
|     env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF | ||||
|     script: bash .travis.sh -b | ||||
|   - stage: compile | ||||
|     os: linux | ||||
|     compiler: gcc | ||||
|     env: CMAKE_BUILD_TYPE=Release | ||||
|     script: bash .travis.sh -b | ||||
| # on Linux, CLANG | ||||
|   - stage: compile | ||||
|     os: linux | ||||
|     compiler: clang | ||||
|     env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF | ||||
|     script: bash .travis.sh -b | ||||
|   - stage: compile | ||||
|     os: linux | ||||
|     compiler: clang | ||||
|     env: CMAKE_BUILD_TYPE=Release | ||||
|     script: bash .travis.sh -b | ||||
| # on Linux, with deprecated ON to make sure that path still compiles | ||||
|   - stage: compile | ||||
|     os: linux | ||||
|     compiler: clang | ||||
|     env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V4=ON | ||||
|     script: bash .travis.sh -b | ||||
| 
 | ||||
| # Matrix configuration: | ||||
| os: | ||||
|   - osx | ||||
|   - linux | ||||
| compiler: | ||||
|   - gcc | ||||
|   - clang | ||||
| env: | ||||
|   global: | ||||
|     - MAKEFLAGS="-j2" | ||||
|     - CCACHE_SLOPPINESS=pch_defines,time_macros | ||||
|     - GTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF | ||||
|     - GTSAM_BUILD_UNSTABLE=ON | ||||
|   matrix: | ||||
|     - CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF | ||||
|     - CMAKE_BUILD_TYPE=Release | ||||
| script: | ||||
|   - bash .travis.sh -t | ||||
| 
 | ||||
| matrix: | ||||
|   exclude: | ||||
|     # Exclude g++ debug on Linux as it consistently times out | ||||
|     - os: linux | ||||
|       compiler: gcc | ||||
|       env : CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF | ||||
|     # Exclude clang on Linux/clang in release until issue #57 is solved | ||||
|     - os: linux | ||||
|       compiler: clang | ||||
|       env : CMAKE_BUILD_TYPE=Release | ||||
							
								
								
									
										571
									
								
								CMakeLists.txt
								
								
								
								
							
							
						
						
									
										571
									
								
								CMakeLists.txt
								
								
								
								
							|  | @ -9,23 +9,22 @@ endif() | |||
| 
 | ||||
| # Set the version number for the library | ||||
| set (GTSAM_VERSION_MAJOR 4) | ||||
| set (GTSAM_VERSION_MINOR 0) | ||||
| set (GTSAM_VERSION_PATCH 2) | ||||
| set (GTSAM_VERSION_MINOR 1) | ||||
| set (GTSAM_VERSION_PATCH 0) | ||||
| math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}") | ||||
| set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}") | ||||
| 
 | ||||
| set (CMAKE_PROJECT_VERSION ${GTSAM_VERSION_STRING}) | ||||
| set (CMAKE_PROJECT_VERSION_MAJOR ${GTSAM_VERSION_MAJOR}) | ||||
| set (CMAKE_PROJECT_VERSION_MINOR ${GTSAM_VERSION_MINOR}) | ||||
| set (CMAKE_PROJECT_VERSION_PATCH ${GTSAM_VERSION_PATCH}) | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Gather information, perform checks, set defaults | ||||
| 
 | ||||
| # Set the default install path to home | ||||
| #set (CMAKE_INSTALL_PREFIX ${HOME} CACHE PATH "Install prefix for library") | ||||
| 
 | ||||
| set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${CMAKE_CURRENT_SOURCE_DIR}/cmake") | ||||
| include(GtsamMakeConfigFile) | ||||
| 
 | ||||
| # Record the root dir for gtsam - needed during external builds, e.g., ROS | ||||
| set(GTSAM_SOURCE_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}) | ||||
| message(STATUS "GTSAM_SOURCE_ROOT_DIR: [${GTSAM_SOURCE_ROOT_DIR}]") | ||||
| include(GNUInstallDirs) | ||||
| 
 | ||||
| # Load build type flags and default to Debug mode | ||||
| include(GtsamBuildTypes) | ||||
|  | @ -39,391 +38,21 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR}) | |||
|   message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ") | ||||
| endif() | ||||
| 
 | ||||
| # See whether gtsam_unstable is available (it will be present only if we're using a git checkout) | ||||
| if(EXISTS "${PROJECT_SOURCE_DIR}/gtsam_unstable" AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/gtsam_unstable") | ||||
|     set(GTSAM_UNSTABLE_AVAILABLE 1) | ||||
| else() | ||||
|     set(GTSAM_UNSTABLE_AVAILABLE 0) | ||||
| endif() | ||||
| include(cmake/HandleBoost.cmake)            # Boost | ||||
| include(cmake/HandleCCache.cmake)           # ccache | ||||
| include(cmake/HandleCPack.cmake)            # CPack | ||||
| include(cmake/HandleEigen.cmake)            # Eigen3 | ||||
| include(cmake/HandleGeneralOptions.cmake)  # CMake build options | ||||
| include(cmake/HandleMKL.cmake)              # MKL | ||||
| include(cmake/HandleOpenMP.cmake)           # OpenMP | ||||
| include(cmake/HandlePerfTools.cmake)        # Google perftools | ||||
| include(cmake/HandlePython.cmake)           # Python options and commands | ||||
| include(cmake/HandleTBB.cmake)              # TBB | ||||
| include(cmake/HandleUninstall.cmake)        # for "make uninstall" | ||||
| 
 | ||||
| # ---------------------------------------------------------------------------- | ||||
| #   Uninstall target, for "make uninstall" | ||||
| # ---------------------------------------------------------------------------- | ||||
| configure_file( | ||||
|   "${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in" | ||||
|   "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" | ||||
|   IMMEDIATE @ONLY) | ||||
| include(cmake/HandleAllocators.cmake)       # Must be after tbb, pertools | ||||
| 
 | ||||
| add_custom_target(uninstall | ||||
|   "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake") | ||||
| 
 | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Set up options | ||||
| 
 | ||||
| # Configurable Options | ||||
| if(GTSAM_UNSTABLE_AVAILABLE) | ||||
|     option(GTSAM_BUILD_UNSTABLE              "Enable/Disable libgtsam_unstable"          ON) | ||||
| endif() | ||||
| option(BUILD_SHARED_LIBS                 "Build shared gtsam library, instead of static" ON) | ||||
| option(GTSAM_USE_QUATERNIONS             "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF) | ||||
| option(GTSAM_POSE3_EXPMAP 			 	 "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." OFF) | ||||
| option(GTSAM_ROT3_EXPMAP 			 	 "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." OFF) | ||||
| option(GTSAM_ENABLE_CONSISTENCY_CHECKS   "Enable/Disable expensive consistency checks"       OFF) | ||||
| option(GTSAM_WITH_TBB                    "Use Intel Threaded Building Blocks (TBB) if available" ON) | ||||
| option(GTSAM_WITH_EIGEN_MKL              "Eigen will use Intel MKL if available" OFF) | ||||
| option(GTSAM_WITH_EIGEN_MKL_OPENMP       "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) | ||||
| option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON) | ||||
| option(GTSAM_ALLOW_DEPRECATED_SINCE_V4   "Allow use of methods/functions deprecated in GTSAM 4" ON) | ||||
| option(GTSAM_TYPEDEF_POINTS_TO_VECTORS   "Typedef Point2 and Point3 to Eigen::Vector equivalents" OFF) | ||||
| option(GTSAM_SUPPORT_NESTED_DISSECTION   "Support Metis-based nested dissection" ON) | ||||
| option(GTSAM_TANGENT_PREINTEGRATION      "Use new ImuFactor with integration on tangent space" ON) | ||||
| if(NOT MSVC AND NOT XCODE_VERSION) | ||||
|     option(GTSAM_BUILD_WITH_CCACHE           "Use ccache compiler cache" ON) | ||||
| endif() | ||||
| 
 | ||||
| if(NOT MSVC AND NOT XCODE_VERSION) | ||||
|   # Set the build type to upper case for downstream use | ||||
|   string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_UPPER) | ||||
| 
 | ||||
|   # Set the GTSAM_BUILD_TAG variable. | ||||
|   # If build type is Release, set to blank (""), else set to the build type. | ||||
|   if("${CMAKE_BUILD_TYPE_UPPER}" STREQUAL "RELEASE") | ||||
|    set(GTSAM_BUILD_TAG "") # Don't create release mode tag on installed directory | ||||
|   else() | ||||
|    set(GTSAM_BUILD_TAG "${CMAKE_BUILD_TYPE}") | ||||
|   endif() | ||||
| endif() | ||||
| 
 | ||||
| # Options relating to MATLAB wrapper | ||||
| # TODO: Check for matlab mex binary before handling building of binaries | ||||
| option(GTSAM_INSTALL_MATLAB_TOOLBOX      "Enable/Disable installation of matlab toolbox"  OFF) | ||||
| option(GTSAM_INSTALL_CYTHON_TOOLBOX      "Enable/Disable installation of Cython toolbox"  OFF) | ||||
| option(GTSAM_BUILD_WRAP                  "Enable/Disable building of matlab/cython wrap utility (necessary for matlab/cython interface)" ON) | ||||
| set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of python to build the cython wrapper for (or Default)") | ||||
| 
 | ||||
| # Check / set dependent variables for MATLAB wrapper | ||||
| if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND NOT GTSAM_BUILD_WRAP) | ||||
| 	message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX or GTSAM_INSTALL_CYTHON_TOOLBOX is enabled, please also enable GTSAM_BUILD_WRAP") | ||||
| endif() | ||||
| if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND GTSAM_BUILD_TYPE_POSTFIXES) | ||||
| 		set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX}) | ||||
| endif() | ||||
| if(GTSAM_INSTALL_WRAP AND NOT GTSAM_BUILD_WRAP) | ||||
| 	message(FATAL_ERROR "GTSAM_INSTALL_WRAP is enabled, please also enable GTSAM_BUILD_WRAP") | ||||
| endif() | ||||
| 
 | ||||
| if(GTSAM_INSTALL_MATLAB_TOOLBOX AND NOT BUILD_SHARED_LIBS) | ||||
| 	message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries.  If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.") | ||||
| endif() | ||||
| 
 | ||||
| if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS) | ||||
|     message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the MATLAB toolbox cannot deal with this yet.  Please turn one of the two options off.") | ||||
| endif() | ||||
| 
 | ||||
| if(GTSAM_INSTALL_CYTHON_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS) | ||||
|     message(FATAL_ERROR "GTSAM_INSTALL_CYTHON_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the CYTHON toolbox cannot deal with this yet.  Please turn one of the two options off.") | ||||
| endif() | ||||
| 
 | ||||
| # Flags for choosing default packaging tools | ||||
| set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator") | ||||
| set(CPACK_GENERATOR        "TGZ" CACHE STRING "CPack Default Binary Generator") | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Find boost | ||||
| 
 | ||||
| # To change the path for boost, you will need to set: | ||||
| # BOOST_ROOT: path to install prefix for boost | ||||
| # Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT | ||||
| 
 | ||||
| if(MSVC) | ||||
| 	# By default, boost only builds static libraries on windows | ||||
| 	set(Boost_USE_STATIC_LIBS ON)  # only find static libs | ||||
| 	# If we ever reset above on windows and, ... | ||||
| 	# If we use Boost shared libs, disable auto linking. | ||||
| 	# Some libraries, at least Boost Program Options, rely on this to export DLL symbols. | ||||
| 	if(NOT Boost_USE_STATIC_LIBS) | ||||
| 		list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK) | ||||
| 	endif() | ||||
| 	# Virtual memory range for PCH exceeded on VS2015 | ||||
| 	if(MSVC_VERSION LESS 1910) # older than VS2017 | ||||
| 	  list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295) | ||||
| 	endif() | ||||
| endif() | ||||
| 
 | ||||
| # If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT() | ||||
| # or explicit instantiation will generate build errors. | ||||
| # See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017 | ||||
| # | ||||
| if(MSVC AND BUILD_SHARED_LIBS) | ||||
| 	list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) | ||||
| endif() | ||||
| 
 | ||||
| # Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such. | ||||
| set(BOOST_FIND_MINIMUM_VERSION 1.43) | ||||
| set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex) | ||||
| 
 | ||||
| find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS}) | ||||
| 
 | ||||
| # Required components | ||||
| if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR | ||||
|     NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY) | ||||
|   message(FATAL_ERROR "Missing required Boost components >= v1.43, please install/upgrade Boost or configure your search paths.") | ||||
| endif() | ||||
| 
 | ||||
| # Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library) | ||||
| option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF) | ||||
| 
 | ||||
| # JLBC: This was once updated to target-based names (Boost::xxx), but it caused | ||||
| # problems with Boost versions newer than FindBoost.cmake was prepared to handle, | ||||
| # so we downgraded this to classic filenames-based variables, and manually adding | ||||
| # the target_include_directories(xxx ${Boost_INCLUDE_DIR}) | ||||
| set(GTSAM_BOOST_LIBRARIES | ||||
|   optimized ${Boost_SERIALIZATION_LIBRARY_RELEASE} | ||||
|   optimized ${Boost_SYSTEM_LIBRARY_RELEASE} | ||||
|   optimized ${Boost_FILESYSTEM_LIBRARY_RELEASE} | ||||
|   optimized ${Boost_THREAD_LIBRARY_RELEASE} | ||||
|   optimized ${Boost_DATE_TIME_LIBRARY_RELEASE} | ||||
|   optimized ${Boost_REGEX_LIBRARY_RELEASE} | ||||
|   debug ${Boost_SERIALIZATION_LIBRARY_DEBUG} | ||||
|   debug ${Boost_SYSTEM_LIBRARY_DEBUG} | ||||
|   debug ${Boost_FILESYSTEM_LIBRARY_DEBUG} | ||||
|   debug ${Boost_THREAD_LIBRARY_DEBUG} | ||||
|   debug ${Boost_DATE_TIME_LIBRARY_DEBUG} | ||||
|   debug ${Boost_REGEX_LIBRARY_DEBUG} | ||||
| ) | ||||
| message(STATUS "GTSAM_BOOST_LIBRARIES: ${GTSAM_BOOST_LIBRARIES}") | ||||
| if (GTSAM_DISABLE_NEW_TIMERS) | ||||
|     message("WARNING:  GTSAM timing instrumentation manually disabled") | ||||
|     list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS) | ||||
| else() | ||||
|     if(Boost_TIMER_LIBRARY) | ||||
|       list(APPEND GTSAM_BOOST_LIBRARIES | ||||
|         optimized ${Boost_TIMER_LIBRARY_RELEASE} | ||||
|         optimized ${Boost_CHRONO_LIBRARY_RELEASE} | ||||
|         debug ${Boost_TIMER_LIBRARY_DEBUG} | ||||
|         debug ${Boost_CHRONO_LIBRARY_DEBUG} | ||||
|         ) | ||||
|     else() | ||||
|       list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt | ||||
|       message("WARNING:  GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.") | ||||
|     endif() | ||||
| endif() | ||||
| 
 | ||||
| 
 | ||||
| if(NOT (${Boost_VERSION} LESS 105600)) | ||||
| 	message("Ignoring Boost restriction on optional lvalue assignment from rvalues") | ||||
| 	list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES BOOST_OPTIONAL_CONFIG_ALLOW_BINDING_TO_RVALUES) | ||||
| endif() | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Find TBB | ||||
| find_package(TBB 4.4 COMPONENTS tbb tbbmalloc) | ||||
| 
 | ||||
| # Set up variables if we're using TBB | ||||
| if(TBB_FOUND AND GTSAM_WITH_TBB) | ||||
| 	set(GTSAM_USE_TBB 1)  # This will go into config.h | ||||
|   # all definitions and link requisites will go via imported targets: | ||||
|   # tbb & tbbmalloc | ||||
|   list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc) | ||||
| else() | ||||
| 	set(GTSAM_USE_TBB 0)  # This will go into config.h | ||||
| endif() | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Prohibit Timing build mode in combination with TBB | ||||
| if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE  STREQUAL "Timing")) | ||||
|       message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.") | ||||
| endif() | ||||
| 
 | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Find Google perftools | ||||
| find_package(GooglePerfTools) | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Support ccache, if installed | ||||
| if(NOT MSVC AND NOT XCODE_VERSION) | ||||
| 	find_program(CCACHE_FOUND ccache) | ||||
| 	if(CCACHE_FOUND) | ||||
| 		if(GTSAM_BUILD_WITH_CCACHE) | ||||
| 			set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) | ||||
| 			set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) | ||||
| 		else() | ||||
| 			set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "") | ||||
| 			set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "") | ||||
| 		endif() | ||||
| 	endif(CCACHE_FOUND) | ||||
| endif() | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Find MKL | ||||
| find_package(MKL) | ||||
| 
 | ||||
| if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL) | ||||
|     set(GTSAM_USE_EIGEN_MKL 1) # This will go into config.h | ||||
|     set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL | ||||
|     list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES}) | ||||
| 
 | ||||
|     # --no-as-needed is required with gcc according to the MKL link advisor | ||||
|     if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") | ||||
|         set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed") | ||||
|     endif() | ||||
| else() | ||||
|     set(GTSAM_USE_EIGEN_MKL 0) | ||||
|     set(EIGEN_USE_MKL_ALL 0) | ||||
| endif() | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Find OpenMP (if we're also using MKL) | ||||
| find_package(OpenMP)  # do this here to generate correct message if disabled | ||||
| 
 | ||||
| if(GTSAM_WITH_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL) | ||||
|     if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP) | ||||
|         set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h | ||||
|         list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC ${OpenMP_CXX_FLAGS}) | ||||
|     endif() | ||||
| endif() | ||||
| 
 | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Option for using system Eigen or GTSAM-bundled Eigen | ||||
| ### These patches only affect usage of MKL. If you want to enable MKL, you *must* | ||||
| ### use our patched version of Eigen | ||||
| ### See:  http://eigen.tuxfamily.org/bz/show_bug.cgi?id=704 (Householder QR MKL selection) | ||||
| ###       http://eigen.tuxfamily.org/bz/show_bug.cgi?id=705 (Fix MKL LLT return code) | ||||
| option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF) | ||||
| option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF) | ||||
| 
 | ||||
| # Switch for using system Eigen or GTSAM-bundled Eigen | ||||
| if(GTSAM_USE_SYSTEM_EIGEN) | ||||
| 	find_package(Eigen3 REQUIRED) | ||||
| 
 | ||||
| 	# Use generic Eigen include paths e.g. <Eigen/Core> | ||||
| 	set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}") | ||||
| 
 | ||||
| 	# check if MKL is also enabled - can have one or the other, but not both! | ||||
| 	# Note: Eigen >= v3.2.5 includes our patches | ||||
| 	if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5)) | ||||
| 	  message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL") | ||||
| 	endif() | ||||
| 
 | ||||
| 	# Check for Eigen version which doesn't work with MKL | ||||
| 	# See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details. | ||||
| 	if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4)) | ||||
| 		message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.") | ||||
| 	endif() | ||||
| 
 | ||||
| 	# The actual include directory (for BUILD cmake target interface): | ||||
| 	set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}") | ||||
| else() | ||||
| 	# Use bundled Eigen include path. | ||||
| 	# Clear any variables set by FindEigen3 | ||||
| 	if(EIGEN3_INCLUDE_DIR) | ||||
| 		set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE) | ||||
| 	endif() | ||||
| 
 | ||||
| 	# set full path to be used by external projects | ||||
| 	# this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in | ||||
| 	set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/") | ||||
| 
 | ||||
| 	# The actual include directory (for BUILD cmake target interface): | ||||
| 	set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/") | ||||
| endif() | ||||
| 
 | ||||
| # Detect Eigen version: | ||||
| set(EIGEN_VER_H "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}/Eigen/src/Core/util/Macros.h") | ||||
| if (EXISTS ${EIGEN_VER_H}) | ||||
| 	file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION) | ||||
| 
 | ||||
| 	# Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION  XX", etc... | ||||
| 
 | ||||
| 	string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}") | ||||
| 	string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}") | ||||
| 
 | ||||
| 	string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}") | ||||
| 	string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}") | ||||
| 
 | ||||
| 	string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}") | ||||
| 	string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}") | ||||
| 
 | ||||
| 	set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}") | ||||
| 
 | ||||
| 	message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}") | ||||
| else() | ||||
| 	message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`") | ||||
| endif () | ||||
| 
 | ||||
| if (MSVC) | ||||
| 	if (BUILD_SHARED_LIBS) | ||||
| 		# mute eigen static assert to avoid errors in shared lib | ||||
| 		list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) | ||||
| 	endif() | ||||
| 	list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen | ||||
| endif() | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Global compile options | ||||
| 
 | ||||
| # Build list of possible allocators | ||||
| set(possible_allocators "") | ||||
| if(GTSAM_USE_TBB) | ||||
| 	list(APPEND possible_allocators TBB) | ||||
| 	set(preferred_allocator TBB) | ||||
| else() | ||||
| 	list(APPEND possible_allocators BoostPool STL) | ||||
| 	set(preferred_allocator STL) | ||||
| endif() | ||||
| if(GOOGLE_PERFTOOLS_FOUND) | ||||
| 	list(APPEND possible_allocators tcmalloc) | ||||
| endif() | ||||
| 
 | ||||
| # Check if current allocator choice is valid and set cache option | ||||
| list(FIND possible_allocators "${GTSAM_DEFAULT_ALLOCATOR}" allocator_valid) | ||||
| if(allocator_valid EQUAL -1) | ||||
| 	set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE) | ||||
| else() | ||||
| 	set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator") | ||||
| endif() | ||||
| set_property(CACHE GTSAM_DEFAULT_ALLOCATOR PROPERTY STRINGS ${possible_allocators}) | ||||
| mark_as_advanced(GTSAM_DEFAULT_ALLOCATOR) | ||||
| 
 | ||||
| # Define compile flags depending on allocator | ||||
| if("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "BoostPool") | ||||
| 	set(GTSAM_ALLOCATOR_BOOSTPOOL 1) | ||||
| elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "STL") | ||||
| 	set(GTSAM_ALLOCATOR_STL 1) | ||||
| elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "TBB") | ||||
| 	set(GTSAM_ALLOCATOR_TBB 1) | ||||
| elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc") | ||||
| 	set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator | ||||
| 	list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc") | ||||
| endif() | ||||
| 
 | ||||
| if(MSVC) | ||||
| 	list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS) | ||||
| 	list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings | ||||
| 	list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code | ||||
| endif() | ||||
| 
 | ||||
| # GCC 4.8+ complains about local typedefs which we use for shared_ptr etc. | ||||
| if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") | ||||
|   if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8) | ||||
|     list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs) | ||||
|   endif() | ||||
| endif() | ||||
| 
 | ||||
| # As of XCode 7, clang also complains about this | ||||
| if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") | ||||
|   if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) | ||||
|     list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs) | ||||
|   endif() | ||||
| endif() | ||||
| 
 | ||||
| if(GTSAM_ENABLE_CONSISTENCY_CHECKS) | ||||
|   # This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h | ||||
|   list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS) | ||||
| endif() | ||||
| include(cmake/HandleGlobalBuildFlags.cmake) # Build flags | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Add components | ||||
|  | @ -431,10 +60,11 @@ endif() | |||
| # Build CppUnitLite | ||||
| add_subdirectory(CppUnitLite) | ||||
| 
 | ||||
| # Build wrap | ||||
| if (GTSAM_BUILD_WRAP) | ||||
|     add_subdirectory(wrap) | ||||
| endif(GTSAM_BUILD_WRAP) | ||||
| # This is the new wrapper | ||||
| if(GTSAM_BUILD_PYTHON) | ||||
|     list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/wrap/cmake") | ||||
|     add_subdirectory(python) | ||||
| endif() | ||||
| 
 | ||||
| # Build GTSAM library | ||||
| add_subdirectory(gtsam) | ||||
|  | @ -451,33 +81,17 @@ add_subdirectory(timing) | |||
| # Build gtsam_unstable | ||||
| if (GTSAM_BUILD_UNSTABLE) | ||||
|     add_subdirectory(gtsam_unstable) | ||||
| endif(GTSAM_BUILD_UNSTABLE) | ||||
| endif() | ||||
| 
 | ||||
| # Matlab toolbox | ||||
| if (GTSAM_INSTALL_MATLAB_TOOLBOX) | ||||
| 	add_subdirectory(matlab) | ||||
|     add_subdirectory(matlab) | ||||
| endif() | ||||
| 
 | ||||
| # Cython wrap | ||||
| if (GTSAM_INSTALL_CYTHON_TOOLBOX) | ||||
|   set(GTSAM_INSTALL_CYTHON_TOOLBOX 1) | ||||
|   # Set up cache options | ||||
|   set(GTSAM_CYTHON_INSTALL_PATH "" CACHE PATH "Cython toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/cython") | ||||
|   if(NOT GTSAM_CYTHON_INSTALL_PATH) | ||||
|     set(GTSAM_CYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/cython") | ||||
|   endif() | ||||
|   set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency) | ||||
|   add_subdirectory(cython) | ||||
| else() | ||||
|   set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h | ||||
| endif() | ||||
| 
 | ||||
| 
 | ||||
| # Install config and export files | ||||
| GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in") | ||||
| export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake) | ||||
| 
 | ||||
| 
 | ||||
| # Check for doxygen availability - optional dependency | ||||
| find_package(Doxygen) | ||||
| 
 | ||||
|  | @ -489,136 +103,11 @@ endif() | |||
| # CMake Tools | ||||
| add_subdirectory(cmake) | ||||
| 
 | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Set up CPack | ||||
| set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM") | ||||
| set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology") | ||||
| set(CPACK_PACKAGE_CONTACT "Frank Dellaert, dellaert@cc.gatech.edu") | ||||
| set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md") | ||||
| set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE") | ||||
| set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR}) | ||||
| set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR}) | ||||
| set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH}) | ||||
| set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}") | ||||
| #set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory | ||||
| #set(CPACK_INSTALLED_DIRECTORIES ".") # FIXME: throws error | ||||
| set(CPACK_SOURCE_IGNORE_FILES "/build*;/\\\\.;/makestats.sh$") | ||||
| set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/gtsam_unstable/") | ||||
| set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/package_scripts/") | ||||
| set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}") | ||||
| #set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs | ||||
| 
 | ||||
| # Deb-package specific cpack | ||||
| set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev") | ||||
| set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.43)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)") | ||||
| 
 | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Print configuration variables | ||||
| message(STATUS "===============================================================") | ||||
| message(STATUS "================  Configuration Options  ======================") | ||||
| message(STATUS "  CMAKE_CXX_COMPILER_ID type     : ${CMAKE_CXX_COMPILER_ID}") | ||||
| message(STATUS "  CMAKE_CXX_COMPILER_VERSION     : ${CMAKE_CXX_COMPILER_VERSION}") | ||||
| message(STATUS "  CMake version                  : ${CMAKE_VERSION}") | ||||
| message(STATUS "  CMake generator                : ${CMAKE_GENERATOR}") | ||||
| message(STATUS "  CMake build tool               : ${CMAKE_BUILD_TOOL}") | ||||
| message(STATUS "Build flags                                               ") | ||||
| print_config_flag(${GTSAM_BUILD_TESTS}                 "Build Tests                    ") | ||||
| print_config_flag(${GTSAM_BUILD_EXAMPLES_ALWAYS}       "Build examples with 'make all' ") | ||||
| print_config_flag(${GTSAM_BUILD_TIMING_ALWAYS}         "Build timing scripts with 'make all'") | ||||
| if (DOXYGEN_FOUND) | ||||
|     print_config_flag(${GTSAM_BUILD_DOCS}              "Build Docs                     ") | ||||
| endif() | ||||
| print_config_flag(${BUILD_SHARED_LIBS}                 "Build shared GTSAM libraries   ") | ||||
| print_config_flag(${GTSAM_BUILD_TYPE_POSTFIXES}        "Put build type in library name ") | ||||
| if(GTSAM_UNSTABLE_AVAILABLE) | ||||
|     print_config_flag(${GTSAM_BUILD_UNSTABLE}          "Build libgtsam_unstable        ") | ||||
| endif() | ||||
| 
 | ||||
| if(NOT MSVC AND NOT XCODE_VERSION) | ||||
|     print_config_flag(${GTSAM_BUILD_WITH_MARCH_NATIVE}     "Build for native architecture  ") | ||||
|     message(STATUS "  Build type                     : ${CMAKE_BUILD_TYPE}") | ||||
|     message(STATUS "  C compilation flags            : ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") | ||||
|     message(STATUS "  C++ compilation flags          : ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") | ||||
| endif() | ||||
| 
 | ||||
| print_build_options_for_target(gtsam) | ||||
| 
 | ||||
| message(STATUS "  Use System Eigen               : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") | ||||
| 
 | ||||
| if(GTSAM_USE_TBB) | ||||
| 	message(STATUS "  Use Intel TBB                  : Yes") | ||||
| elseif(TBB_FOUND) | ||||
| 	message(STATUS "  Use Intel TBB                  : TBB found but GTSAM_WITH_TBB is disabled") | ||||
| else() | ||||
| 	message(STATUS "  Use Intel TBB                  : TBB not found") | ||||
| endif() | ||||
| if(GTSAM_USE_EIGEN_MKL) | ||||
| 	message(STATUS "  Eigen will use MKL             : Yes") | ||||
| elseif(MKL_FOUND) | ||||
| 	message(STATUS "  Eigen will use MKL             : MKL found but GTSAM_WITH_EIGEN_MKL is disabled") | ||||
| else() | ||||
| 	message(STATUS "  Eigen will use MKL             : MKL not found") | ||||
| endif() | ||||
| if(GTSAM_USE_EIGEN_MKL_OPENMP) | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP  : Yes") | ||||
| elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL) | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP  : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") | ||||
| elseif(OPENMP_FOUND AND NOT MKL_FOUND) | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP  : OpenMP found but MKL not found") | ||||
| elseif(OPENMP_FOUND) | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP  : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") | ||||
| else() | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP  : OpenMP not found") | ||||
| endif() | ||||
| message(STATUS "  Default allocator              : ${GTSAM_DEFAULT_ALLOCATOR}") | ||||
| 
 | ||||
| if(NOT MSVC AND NOT XCODE_VERSION) | ||||
| 	if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) | ||||
| 		message(STATUS "  Build with ccache              : Yes") | ||||
| 	elseif(CCACHE_FOUND) | ||||
| 		message(STATUS "  Build with ccache              : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") | ||||
| 	else() | ||||
| 		message(STATUS "  Build with ccache              : No") | ||||
| 	endif() | ||||
| endif() | ||||
| 
 | ||||
| message(STATUS "Packaging flags                                               ") | ||||
| message(STATUS "  CPack Source Generator         : ${CPACK_SOURCE_GENERATOR}") | ||||
| message(STATUS "  CPack Generator                : ${CPACK_GENERATOR}") | ||||
| 
 | ||||
| message(STATUS "GTSAM flags                                               ") | ||||
| print_config_flag(${GTSAM_USE_QUATERNIONS}             "Quaternions as default Rot3     ") | ||||
| print_config_flag(${GTSAM_ENABLE_CONSISTENCY_CHECKS}   "Runtime consistency checking    ") | ||||
| print_config_flag(${GTSAM_ROT3_EXPMAP}                 "Rot3 retract is full ExpMap     ") | ||||
| print_config_flag(${GTSAM_POSE3_EXPMAP}                "Pose3 retract is full ExpMap    ") | ||||
| print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4}   "Deprecated in GTSAM 4 allowed   ") | ||||
| print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS}   "Point3 is typedef to Vector3    ") | ||||
| print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION}   "Metis-based Nested Dissection   ") | ||||
| print_config_flag(${GTSAM_TANGENT_PREINTEGRATION}      "Use tangent-space preintegration") | ||||
| print_config_flag(${GTSAM_BUILD_WRAP}                  "Build Wrap                     ") | ||||
| 
 | ||||
| message(STATUS "MATLAB toolbox flags                                      ") | ||||
| print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX}      "Install matlab toolbox         ") | ||||
| 
 | ||||
| message(STATUS "Cython toolbox flags                                      ") | ||||
| print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX}      "Install Cython toolbox         ") | ||||
| if(GTSAM_INSTALL_CYTHON_TOOLBOX) | ||||
| 	message(STATUS "  Python version                 : ${GTSAM_PYTHON_VERSION}") | ||||
| endif() | ||||
| message(STATUS "===============================================================") | ||||
| include(cmake/HandlePrintConfiguration.cmake) | ||||
| 
 | ||||
| # Print warnings at the end | ||||
| if(GTSAM_WITH_TBB AND NOT TBB_FOUND) | ||||
| 	message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled.  Set GTSAM_WITH_TBB to 'Off' to avoid this warning.") | ||||
| endif() | ||||
| if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND) | ||||
| 	message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled.  Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning.  See INSTALL.md for notes on performance.") | ||||
| endif() | ||||
| if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND) | ||||
| 	message(WARNING "Your compiler does not support OpenMP.  Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.") | ||||
| endif() | ||||
| include(cmake/HandleFinalChecks.cmake) | ||||
| 
 | ||||
| # Include CPack *after* all flags | ||||
| include(CPack) | ||||
|  |  | |||
|  | @ -6,12 +6,12 @@ file(GLOB cppunitlite_src "*.cpp") | |||
| add_library(CppUnitLite STATIC ${cppunitlite_src} ${cppunitlite_headers}) | ||||
| list(APPEND GTSAM_EXPORTED_TARGETS CppUnitLite) | ||||
| set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) | ||||
| target_include_directories(CppUnitLite PUBLIC ${Boost_INCLUDE_DIR}) # boost/lexical_cast.h | ||||
| target_link_libraries(CppUnitLite PUBLIC Boost::boost) # boost/lexical_cast.h | ||||
| 
 | ||||
| gtsam_assign_source_folders("${cppunitlite_headers};${cppunitlite_src}") # MSVC project structure | ||||
| 
 | ||||
| option(GTSAM_INSTALL_CPPUNITLITE "Enable/Disable installation of CppUnitLite library" ON) | ||||
| if (GTSAM_INSTALL_CPPUNITLITE) | ||||
|     install(FILES ${cppunitlite_headers} DESTINATION include/CppUnitLite) | ||||
|     install(TARGETS CppUnitLite EXPORT GTSAM-exports ARCHIVE DESTINATION lib) | ||||
|     install(FILES ${cppunitlite_headers} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/CppUnitLite) | ||||
|     install(TARGETS CppUnitLite EXPORT GTSAM-exports ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) | ||||
| endif(GTSAM_INSTALL_CPPUNITLITE) | ||||
|  |  | |||
|  | @ -64,7 +64,7 @@ protected: | |||
|   class testGroup##testName##Test : public Test \ | ||||
|   { public: testGroup##testName##Test () : Test (#testName "Test", __FILE__, __LINE__, true) {} \ | ||||
|             virtual ~testGroup##testName##Test () {};\ | ||||
|             void run (TestResult& result_);} \ | ||||
|             void run (TestResult& result_) override;} \ | ||||
|     testGroup##testName##Instance; \ | ||||
|   void testGroup##testName##Test::run (TestResult& result_) | ||||
| 
 | ||||
|  | @ -82,7 +82,7 @@ protected: | |||
|   class testGroup##testName##Test : public Test \ | ||||
|   { public: testGroup##testName##Test () : Test (#testName "Test", __FILE__, __LINE__, false) {} \ | ||||
|             virtual ~testGroup##testName##Test () {};\ | ||||
|             void run (TestResult& result_);} \ | ||||
|             void run (TestResult& result_) override;} \ | ||||
|     testGroup##testName##Instance; \ | ||||
|   void testGroup##testName##Test::run (TestResult& result_) | ||||
| 
 | ||||
|  |  | |||
|  | @ -72,9 +72,9 @@ A Lie group is both a manifold *and* a group. Hence, a LIE_GROUP type should imp | |||
| However, we now also need to be able to evaluate the derivatives of compose and inverse.  | ||||
| Hence, we have the following extra valid static functions defined in the struct `gtsam::traits<T>`: | ||||
| 
 | ||||
| * `r = traits<T>::Compose(p,q,Hq,Hp)` | ||||
| * `r = traits<T>::Compose(p,q,Hp,Hq)` | ||||
| * `q = traits<T>::Inverse(p,Hp)` | ||||
| * `r = traits<T>::Between(p,q,Hq,H2p)` | ||||
| * `r = traits<T>::Between(p,q,Hp,Hq)` | ||||
| 
 | ||||
| where above the *H* arguments stand for optional Jacobian arguments.  | ||||
| That makes it possible to create factors implementing priors (PriorFactor) or relations between two instances of a Lie group type (BetweenFactor). | ||||
|  |  | |||
|  | @ -13,7 +13,7 @@ $ make install | |||
| ## Important Installation Notes | ||||
| 
 | ||||
| 1. GTSAM requires the following libraries to be installed on your system: | ||||
|     - BOOST version 1.43 or greater (install through Linux repositories or MacPorts) | ||||
|     - BOOST version 1.58 or greater (install through Linux repositories or MacPorts) | ||||
|     - Cmake version 3.0 or higher | ||||
|     - Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher | ||||
| 
 | ||||
|  | @ -173,7 +173,7 @@ NOTE:  If _GLIBCXX_DEBUG is used to compile gtsam, anything that links against g | |||
| Intel has a guide for installing MKL on Linux through APT repositories at <https://software.intel.com/en-us/articles/installing-intel-free-libs-and-python-apt-repo>. | ||||
| 
 | ||||
| After following the instructions, add the following to your `~/.bashrc` (and afterwards, open a new terminal before compiling GTSAM): | ||||
| `LD_PRELOAD` need only be set if you are building the cython wrapper to use GTSAM from python. | ||||
| `LD_PRELOAD` need only be set if you are building the python wrapper to use GTSAM from python. | ||||
| ```sh | ||||
| source /opt/intel/mkl/bin/mklvars.sh intel64 | ||||
| export LD_PRELOAD="$LD_PRELOAD:/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_sequential.so" | ||||
|  | @ -190,6 +190,6 @@ Failing to specify `LD_PRELOAD` may lead to errors such as: | |||
| `ImportError: /opt/intel/mkl/lib/intel64/libmkl_vml_avx2.so: undefined symbol: mkl_serv_getenv` | ||||
| or | ||||
| `Intel MKL FATAL ERROR: Cannot load libmkl_avx2.so or libmkl_def.so.` | ||||
| when importing GTSAM using the cython wrapper in python. | ||||
| when importing GTSAM using the python wrapper. | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										2
									
								
								LICENSE
								
								
								
								
							
							
						
						
									
										2
									
								
								LICENSE
								
								
								
								
							|  | @ -23,3 +23,5 @@ ordering library | |||
|     - Included unmodified in gtsam/3rdparty/metis | ||||
|     - Licenced under Apache License v 2.0, provided in | ||||
|       gtsam/3rdparty/metis/LICENSE.txt | ||||
| - Spectra v0.9.0: Sparse Eigenvalue Computation Toolkit as a Redesigned ARPACK. | ||||
|     - Licenced under MPL2, provided at https://github.com/yixuan/spectra | ||||
|  |  | |||
							
								
								
									
										43
									
								
								README.md
								
								
								
								
							
							
						
						
									
										43
									
								
								README.md
								
								
								
								
							|  | @ -1,17 +1,29 @@ | |||
| [](https://travis-ci.com/borglab/gtsam/) | ||||
| # README - Georgia Tech Smoothing and Mapping Library | ||||
| 
 | ||||
| # README - Georgia Tech Smoothing and Mapping library | ||||
| **Important Note** | ||||
| 
 | ||||
| As of August 1 2020, the `develop` branch is officially in "Pre 4.1" mode, and features deprecated in 4.0 have been removed. Please use the last [4.0.3 release](https://github.com/borglab/gtsam/releases/tag/4.0.3) if you need those features.  | ||||
| 
 | ||||
| However, most are easily converted and can be tracked down (in 4.0.3) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V4`. | ||||
| 
 | ||||
| ## What is GTSAM? | ||||
| 
 | ||||
| GTSAM is a library of C++ classes that implement smoothing and | ||||
| mapping (SAM) in robotics and vision, using factor graphs and Bayes | ||||
| networks as the underlying computing paradigm rather than sparse | ||||
| GTSAM is a C++ library that implements smoothing and | ||||
| mapping (SAM) in robotics and vision, using Factor Graphs and Bayes | ||||
| Networks as the underlying computing paradigm rather than sparse | ||||
| matrices. | ||||
| 
 | ||||
| On top of the C++ library, GTSAM includes a MATLAB interface (enable | ||||
| GTSAM_INSTALL_MATLAB_TOOLBOX in CMake to build it). A Python interface | ||||
| is under development. | ||||
| The current support matrix is: | ||||
| 
 | ||||
| | Platform     | Compiler  | Build Status  | | ||||
| |:------------:|:---------:|:-------------:| | ||||
| | Ubuntu 18.04 | gcc/clang |  | | ||||
| | macOS        | clang     |  | | ||||
| | Windows      | MSVC      |  | | ||||
| 
 | ||||
| 
 | ||||
| On top of the C++ library, GTSAM includes [wrappers for MATLAB & Python](#wrappers). | ||||
| 
 | ||||
| 
 | ||||
| ## Quickstart | ||||
| 
 | ||||
|  | @ -28,7 +40,7 @@ $ make install | |||
| 
 | ||||
| Prerequisites: | ||||
| 
 | ||||
| - [Boost](http://www.boost.org/users/download/) >= 1.43 (Ubuntu: `sudo apt-get install libboost-all-dev`) | ||||
| - [Boost](http://www.boost.org/users/download/) >= 1.58 (Ubuntu: `sudo apt-get install libboost-all-dev`) | ||||
| - [CMake](http://www.cmake.org/cmake/resources/software.html) >= 3.0 (Ubuntu: `sudo apt-get install cmake`) | ||||
| - A modern compiler, i.e., at least gcc 4.7.3 on Linux. | ||||
| 
 | ||||
|  | @ -41,13 +53,16 @@ Optional prerequisites - used automatically if findable by CMake: | |||
| 
 | ||||
| ## GTSAM 4 Compatibility | ||||
| 
 | ||||
| GTSAM 4 will introduce several new features, most notably Expressions and a python toolbox. We will also deprecate some legacy functionality and wrongly named methods, but by default the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 is enabled, allowing anyone to just pull V4 and compile. To build the python toolbox, however, you will have to explicitly disable that flag. | ||||
| GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. It also introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect. | ||||
| 
 | ||||
| GTSAM 4 also deprecated some legacy functionality and wrongly named methods. If you are on a 4.0.X release, you can define the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 to use the deprecated methods. | ||||
| 
 | ||||
| GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionality. There is a flag GTSAM_ALLOW_DEPRECATED_SINCE_V41 for newly deprecated methods since the 4.1 release, which is on by default, allowing anyone to just pull version 4.1 and compile. | ||||
| 
 | ||||
| Also, GTSAM 4 introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we will also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 will be deprecated, so please be aware that this might render functions using their default constructor incorrect. | ||||
| 
 | ||||
| ## Wrappers | ||||
| 
 | ||||
| We provide support for [MATLAB](matlab/README.md) and [Python](cython/README.md) wrappers for GTSAM. Please refer to the linked documents for more details. | ||||
| We provide support for [MATLAB](matlab/README.md) and [Python](python/README.md) wrappers for GTSAM. Please refer to the linked documents for more details. | ||||
| 
 | ||||
| ## The Preintegrated IMU Factor | ||||
| 
 | ||||
|  | @ -62,7 +77,7 @@ Our implementation improves on this using integration on the manifold, as detail | |||
| 
 | ||||
| If you are using the factor in academic work, please cite the publications above. | ||||
| 
 | ||||
| In GTSAM 4 a new and more efficient implementation, based on integrating on the NavState tangent space and detailed in docs/ImuFactor.pdf, is enabled by default. To switch to the RSS 2015 version, set the flag **GTSAM_TANGENT_PREINTEGRATION** to OFF. | ||||
| In GTSAM 4 a new and more efficient implementation, based on integrating on the NavState tangent space and detailed in [this document](doc/ImuFactor.pdf), is enabled by default. To switch to the RSS 2015 version, set the flag **GTSAM_TANGENT_PREINTEGRATION** to OFF. | ||||
| 
 | ||||
| 
 | ||||
| ## Additional Information | ||||
|  | @ -79,4 +94,4 @@ GTSAM is open source under the BSD license, see the [`LICENSE`](LICENSE) and [`L | |||
| 
 | ||||
| Please see the [`examples/`](examples) directory and the [`USAGE`](USAGE.md) file for examples on how to use GTSAM. | ||||
| 
 | ||||
| GTSAM was developed in the lab of [Frank Dellaert](http://www.cc.gatech.edu/~dellaert) at the [Georgia Institute of Technology](http://www.gatech.edu), with the help of many contributors over the years, see [THANKS](THANKS). | ||||
| GTSAM was developed in the lab of [Frank Dellaert](http://www.cc.gatech.edu/~dellaert) at the [Georgia Institute of Technology](http://www.gatech.edu), with the help of many contributors over the years, see [THANKS](THANKS.md). | ||||
|  |  | |||
|  | @ -17,10 +17,8 @@ install(FILES | |||
|   GtsamBuildTypes.cmake | ||||
|   GtsamMakeConfigFile.cmake | ||||
|   GtsamMatlabWrap.cmake | ||||
|   GtsamPythonWrap.cmake | ||||
|   GtsamCythonWrap.cmake | ||||
|   GtsamTesting.cmake | ||||
|   FindCython.cmake | ||||
|   GtsamPrinting.cmake | ||||
|   FindNumPy.cmake | ||||
|   README.html | ||||
|   DESTINATION "${SCRIPT_INSTALL_DIR}/GTSAMCMakeTools") | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -1,81 +0,0 @@ | |||
| # Modifed from: https://github.com/nest/nest-simulator/blob/master/cmake/FindCython.cmake | ||||
| # | ||||
| # Find the Cython compiler. | ||||
| # | ||||
| # This code sets the following variables: | ||||
| # | ||||
| #  CYTHON_FOUND | ||||
| #  CYTHON_PATH | ||||
| #  CYTHON_EXECUTABLE | ||||
| #  CYTHON_VERSION | ||||
| # | ||||
| # See also UseCython.cmake | ||||
| 
 | ||||
| #============================================================================= | ||||
| # Copyright 2011 Kitware, Inc. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| #============================================================================= | ||||
| 
 | ||||
| # Use the Cython executable that lives next to the Python executable | ||||
| # if it is a local installation. | ||||
| if(GTSAM_PYTHON_VERSION STREQUAL "Default") | ||||
|   find_package(PythonInterp) | ||||
| else() | ||||
|   find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT) | ||||
| endif() | ||||
| 
 | ||||
| if ( PYTHONINTERP_FOUND ) | ||||
|   execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c" | ||||
|       "import Cython; print(Cython.__path__[0])" | ||||
|       RESULT_VARIABLE RESULT | ||||
|       OUTPUT_VARIABLE CYTHON_PATH | ||||
|       OUTPUT_STRIP_TRAILING_WHITESPACE | ||||
|   ) | ||||
| endif () | ||||
| 
 | ||||
| # RESULT=0 means ok | ||||
| if ( NOT RESULT ) | ||||
|   get_filename_component( _python_path ${PYTHON_EXECUTABLE} PATH ) | ||||
|   find_program( CYTHON_EXECUTABLE | ||||
|       NAMES cython cython.bat cython3 | ||||
|       HINTS ${_python_path} | ||||
|    ) | ||||
| endif () | ||||
| 
 | ||||
| # RESULT=0 means ok | ||||
| if ( NOT RESULT ) | ||||
|   execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c" | ||||
|       "import Cython; print(Cython.__version__)" | ||||
|       RESULT_VARIABLE RESULT | ||||
|       OUTPUT_VARIABLE CYTHON_VAR_OUTPUT | ||||
|       ERROR_VARIABLE CYTHON_VAR_OUTPUT | ||||
|       OUTPUT_STRIP_TRAILING_WHITESPACE | ||||
|   ) | ||||
|   if ( RESULT EQUAL 0 ) | ||||
|     string( REGEX REPLACE ".* ([0-9]+\\.[0-9]+(\\.[0-9]+)?).*" "\\1" | ||||
|                           CYTHON_VERSION "${CYTHON_VAR_OUTPUT}" ) | ||||
|   endif () | ||||
| endif () | ||||
| 
 | ||||
| include( FindPackageHandleStandardArgs ) | ||||
| find_package_handle_standard_args( Cython | ||||
|   FOUND_VAR | ||||
|     CYTHON_FOUND | ||||
|   REQUIRED_VARS | ||||
|     CYTHON_PATH | ||||
|     CYTHON_EXECUTABLE | ||||
|   VERSION_VAR | ||||
|     CYTHON_VERSION | ||||
|     ) | ||||
| 
 | ||||
|  | @ -115,7 +115,7 @@ IF(WIN32 AND MKL_ROOT_DIR) | |||
|         IF (MKL_INCLUDE_DIR MATCHES "10.3") | ||||
|                 SET(MKL_LIBS ${MKL_LIBS} libiomp5md) | ||||
|         ENDIF() | ||||
|          | ||||
| 
 | ||||
|         FOREACH (LIB ${MKL_LIBS}) | ||||
|                 FIND_LIBRARY(${LIB}_PATH ${LIB} PATHS ${MKL_LIB_SEARCHPATH} ENV LIBRARY_PATH) | ||||
|                 IF(${LIB}_PATH) | ||||
|  | @ -147,7 +147,7 @@ ELSEIF(MKL_ROOT_DIR) # UNIX and macOS | |||
|                 ${MKL_ROOT_DIR}/lib/${MKL_ARCH_DIR} | ||||
|                 ${MKL_ROOT_DIR}/lib/ | ||||
|         ) | ||||
|          | ||||
| 
 | ||||
|         # MKL on Mac OS doesn't ship with GNU thread versions, only Intel versions (see above) | ||||
|         IF(NOT APPLE) | ||||
|             FIND_LIBRARY(MKL_GNUTHREAD_LIBRARY | ||||
|  | @ -231,6 +231,7 @@ ELSEIF(MKL_ROOT_DIR) # UNIX and macOS | |||
|                         FIND_LIBRARY(MKL_IOMP5_LIBRARY | ||||
|                           iomp5 | ||||
|                           PATHS | ||||
|                                 ${MKL_ROOT_DIR}/lib/intel64 | ||||
|                                 ${MKL_ROOT_DIR}/../lib/intel64 | ||||
|                         ) | ||||
|                 ELSE() | ||||
|  | @ -254,7 +255,7 @@ ELSEIF(MKL_ROOT_DIR) # UNIX and macOS | |||
|         ELSE() | ||||
|             SET(MKL_LIBRARIES ${MKL_LP_GNUTHREAD_LIBRARIES}) | ||||
|         ENDIF() | ||||
|          | ||||
| 
 | ||||
|         MARK_AS_ADVANCED(MKL_CORE_LIBRARY MKL_LP_LIBRARY MKL_ILP_LIBRARY | ||||
|                 MKL_SEQUENTIAL_LIBRARY MKL_INTELTHREAD_LIBRARY MKL_GNUTHREAD_LIBRARY) | ||||
| ENDIF() | ||||
|  | @ -266,4 +267,4 @@ find_package_handle_standard_args(MKL DEFAULT_MSG MKL_INCLUDE_DIR MKL_LIBRARIES) | |||
| #        LINK_DIRECTORIES(${MKL_ROOT_DIR}/lib/${MKL_ARCH_DIR}) # hack | ||||
| #endif() | ||||
| 
 | ||||
| MARK_AS_ADVANCED(MKL_INCLUDE_DIR MKL_LIBRARIES) | ||||
| MARK_AS_ADVANCED(MKL_INCLUDE_DIR MKL_LIBRARIES) | ||||
|  |  | |||
|  | @ -40,17 +40,9 @@ | |||
| 
 | ||||
| # Finding NumPy involves calling the Python interpreter | ||||
| if(NumPy_FIND_REQUIRED) | ||||
|   if(GTSAM_PYTHON_VERSION STREQUAL "Default") | ||||
|     find_package(PythonInterp REQUIRED) | ||||
|   else() | ||||
|       find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED) | ||||
|   endif() | ||||
|   find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED) | ||||
| else() | ||||
|   if(GTSAM_PYTHON_VERSION STREQUAL "Default") | ||||
|     find_package(PythonInterp) | ||||
|   else() | ||||
|     find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT) | ||||
|   endif() | ||||
|   find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT) | ||||
| endif() | ||||
| 
 | ||||
| if(NOT PYTHONINTERP_FOUND) | ||||
|  |  | |||
|  | @ -1,3 +1,10 @@ | |||
| include(CheckCXXCompilerFlag) # for check_cxx_compiler_flag() | ||||
| 
 | ||||
| # Set cmake policy to recognize the AppleClang compiler | ||||
| # independently from the Clang compiler. | ||||
| if(POLICY CMP0025) | ||||
|   cmake_policy(SET CMP0025 NEW) | ||||
| endif() | ||||
| 
 | ||||
| # function:  list_append_cache(var [new_values ...]) | ||||
| # Like "list(APPEND ...)" but working for CACHE variables. | ||||
|  | @ -81,6 +88,11 @@ if(MSVC) | |||
|     WINDOWS_LEAN_AND_MEAN | ||||
|     NOMINMAX | ||||
| 	) | ||||
|   # Avoid literally hundreds to thousands of warnings: | ||||
|   list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC | ||||
| 	/wd4267 # warning C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data | ||||
|   ) | ||||
| 
 | ||||
| endif() | ||||
| 
 | ||||
| # Other (non-preprocessor macros) compiler flags: | ||||
|  | @ -94,7 +106,27 @@ if(MSVC) | |||
|   set(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING          /MD /O2  CACHE STRING "(User editable) Private compiler flags for Timing configuration.") | ||||
| else() | ||||
|   # Common to all configurations, next for each configuration: | ||||
|   set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON          -Wall CACHE STRING "(User editable) Private compiler flags for all configurations.") | ||||
| 
 | ||||
|   if (NOT MSVC) | ||||
|     check_cxx_compiler_flag(-Wsuggest-override COMPILER_HAS_WSUGGEST_OVERRIDE) | ||||
|     check_cxx_compiler_flag(-Wmissing COMPILER_HAS_WMISSING_OVERRIDE) | ||||
|     if (COMPILER_HAS_WSUGGEST_OVERRIDE) | ||||
|       set(flag_override_ -Wsuggest-override) # -Werror=suggest-override: Add again someday | ||||
|     elseif(COMPILER_HAS_WMISSING_OVERRIDE) | ||||
|       set(flag_override_ -Wmissing-override) # -Werror=missing-override: Add again someday | ||||
|     endif() | ||||
|   endif() | ||||
| 
 | ||||
|   set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON | ||||
|     -Wall                                          # Enable common warnings | ||||
|     -fPIC                                          # ensure proper code generation for shared libraries | ||||
|     $<$<CXX_COMPILER_ID:GNU>:-Wreturn-local-addr -Werror=return-local-addr>            # Error: return local address | ||||
|     $<$<CXX_COMPILER_ID:Clang>:-Wreturn-stack-address   -Werror=return-stack-address>  # Error: return local address | ||||
|     -Wreturn-type  -Werror=return-type             # Error on missing return() | ||||
|     -Wformat -Werror=format-security               # Error on wrong printf() arguments | ||||
|     $<$<COMPILE_LANGUAGE:CXX>:${flag_override_}>   # Enforce the use of the override keyword | ||||
|     # | ||||
|     CACHE STRING "(User editable) Private compiler flags for all configurations.") | ||||
|   set(GTSAM_COMPILE_OPTIONS_PRIVATE_DEBUG           -g -fno-inline  CACHE STRING "(User editable) Private compiler flags for Debug configuration.") | ||||
|   set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELWITHDEBINFO  -g -O3  CACHE STRING "(User editable) Private compiler flags for RelWithDebInfo configuration.") | ||||
|   set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELEASE         -O3  CACHE STRING "(User editable) Private compiler flags for Release configuration.") | ||||
|  | @ -236,3 +268,17 @@ function(gtsam_apply_build_flags target_name_) | |||
|   target_compile_options(${target_name_} PRIVATE ${GTSAM_COMPILE_OPTIONS_PRIVATE}) | ||||
| 
 | ||||
| endfunction(gtsam_apply_build_flags) | ||||
| 
 | ||||
| 
 | ||||
| if(NOT MSVC AND NOT XCODE_VERSION) | ||||
|   # Set the build type to upper case for downstream use | ||||
|   string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_UPPER) | ||||
| 
 | ||||
|   # Set the GTSAM_BUILD_TAG variable. | ||||
|   # If build type is Release, set to blank (""), else set to the build type. | ||||
|   if(${CMAKE_BUILD_TYPE_UPPER} STREQUAL "RELEASE") | ||||
|    set(GTSAM_BUILD_TAG "") # Don't create release mode tag on installed directory | ||||
|   else() | ||||
|    set(GTSAM_BUILD_TAG "${CMAKE_BUILD_TYPE}") | ||||
|   endif() | ||||
| endif() | ||||
|  |  | |||
|  | @ -1,268 +0,0 @@ | |||
| # Check Cython version, need to be >=0.25.2 | ||||
| # Unset these cached variables to avoid surprises when the python/cython | ||||
| # in the current environment are different from the cached! | ||||
| unset(PYTHON_EXECUTABLE CACHE) | ||||
| unset(CYTHON_EXECUTABLE CACHE) | ||||
| unset(PYTHON_INCLUDE_DIR CACHE) | ||||
| unset(PYTHON_MAJOR_VERSION CACHE) | ||||
| 
 | ||||
| if(GTSAM_PYTHON_VERSION STREQUAL "Default") | ||||
|   find_package(PythonInterp REQUIRED) | ||||
|   find_package(PythonLibs REQUIRED) | ||||
| else() | ||||
|   find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED) | ||||
|   find_package(PythonLibs ${GTSAM_PYTHON_VERSION} EXACT REQUIRED) | ||||
| endif() | ||||
| find_package(Cython 0.25.2 REQUIRED) | ||||
| 
 | ||||
| execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" | ||||
|     "from __future__ import print_function;import sys;print(sys.version[0], end='')" | ||||
|     OUTPUT_VARIABLE PYTHON_MAJOR_VERSION | ||||
| ) | ||||
| 
 | ||||
| # User-friendly Cython wrapping and installing function. | ||||
| # Builds a Cython module from the provided interface_header. | ||||
| # For example, for the interface header gtsam.h, | ||||
| # this will build the wrap module 'gtsam'. | ||||
| # | ||||
| # Arguments: | ||||
| # | ||||
| # interface_header:  The relative path to the wrapper interface definition file. | ||||
| # extra_imports: extra header to import in the Cython pxd file. | ||||
| #                For example, to use Cython gtsam.pxd in your own module, | ||||
| #        use "from gtsam cimport *" | ||||
| # install_path: destination to install the library | ||||
| # libs: libraries to link with | ||||
| # dependencies: Dependencies which need to be built before the wrapper | ||||
| function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies) | ||||
|   # Paths for generated files | ||||
|   get_filename_component(module_name "${interface_header}" NAME_WE) | ||||
|   set(generated_files_path "${PROJECT_BINARY_DIR}/cython/${module_name}") | ||||
|   wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}") | ||||
|   install_cython_wrapped_library("${interface_header}" "${generated_files_path}" "${install_path}") | ||||
| endfunction() | ||||
| 
 | ||||
| function(set_up_required_cython_packages) | ||||
|   # Set up building of cython module | ||||
|   include_directories(${PYTHON_INCLUDE_DIRS}) | ||||
|   find_package(NumPy REQUIRED) | ||||
|   include_directories(${NUMPY_INCLUDE_DIRS}) | ||||
| endfunction() | ||||
| 
 | ||||
| 
 | ||||
| # Convert pyx to cpp by executing cython | ||||
| # This is the first step to compile cython from the command line | ||||
| # as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html | ||||
| # | ||||
| # Arguments: | ||||
| #    - target:  The specified target for this step | ||||
| #    - pyx_file:   The input pyx_file in full *absolute* path | ||||
| #    - generated_cpp:   The output cpp file in full absolute path | ||||
| #    - include_dirs:   Directories to include when executing cython | ||||
| function(pyx_to_cpp target pyx_file generated_cpp include_dirs) | ||||
|   foreach(dir ${include_dirs}) | ||||
|     set(includes_for_cython ${includes_for_cython}  -I ${dir}) | ||||
|   endforeach() | ||||
| 
 | ||||
|   add_custom_command( | ||||
|     OUTPUT ${generated_cpp} | ||||
|     COMMAND | ||||
|     ${CYTHON_EXECUTABLE} -X boundscheck=False -v --fast-fail --cplus -${PYTHON_MAJOR_VERSION} ${includes_for_cython} ${pyx_file} -o ${generated_cpp} | ||||
|     VERBATIM) | ||||
|   add_custom_target(${target} ALL DEPENDS ${generated_cpp}) | ||||
| endfunction() | ||||
| 
 | ||||
| # Build the cpp file generated by converting pyx using cython | ||||
| # This is the second step to compile cython from the command line | ||||
| # as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html | ||||
| # | ||||
| # Arguments: | ||||
| #    - target:  The specified target for this step | ||||
| #    - cpp_file:   The input cpp_file in full *absolute* path | ||||
| #    - output_lib_we:   The output lib filename only (without extension) | ||||
| #    - output_dir:   The output directory | ||||
| function(build_cythonized_cpp target cpp_file output_lib_we output_dir) | ||||
|   add_library(${target} MODULE ${cpp_file}) | ||||
|   if(APPLE) | ||||
|     set(link_flags "-undefined dynamic_lookup") | ||||
|   endif() | ||||
|   set_target_properties(${target} | ||||
|       PROPERTIES COMPILE_FLAGS "-w" | ||||
|       LINK_FLAGS "${link_flags}" | ||||
|       OUTPUT_NAME ${output_lib_we} | ||||
|       PREFIX "" | ||||
|       ${CMAKE_BUILD_TYPE_UPPER}_POSTFIX "" | ||||
|       LIBRARY_OUTPUT_DIRECTORY ${output_dir}) | ||||
| endfunction() | ||||
| 
 | ||||
| # Cythonize a pyx from the command line as described at | ||||
| # http://cython.readthedocs.io/en/latest/src/reference/compilation.html | ||||
| # Arguments: | ||||
| #    - target:        The specified target | ||||
| #    - pyx_file:      The input pyx_file in full *absolute* path | ||||
| #    - output_lib_we: The output lib filename only (without extension) | ||||
| #    - output_dir:    The output directory | ||||
| #    - include_dirs:  Directories to include when executing cython | ||||
| #    - libs:          Libraries to link with | ||||
| #    - interface_header: For dependency. Any update in interface header will re-trigger cythonize | ||||
| function(cythonize target pyx_file output_lib_we output_dir include_dirs libs interface_header dependencies) | ||||
|   get_filename_component(pyx_path "${pyx_file}" DIRECTORY) | ||||
|   get_filename_component(pyx_name "${pyx_file}" NAME_WE) | ||||
|   set(generated_cpp "${output_dir}/${pyx_name}.cpp") | ||||
| 
 | ||||
|   set_up_required_cython_packages() | ||||
|   pyx_to_cpp(${target}_pyx2cpp ${pyx_file} ${generated_cpp} "${include_dirs}") | ||||
| 
 | ||||
|   # Late dependency injection, to make sure this gets called whenever the interface header is updated | ||||
|   # See: https://stackoverflow.com/questions/40032593/cmake-does-not-rebuild-dependent-after-prerequisite-changes | ||||
|   add_custom_command(OUTPUT ${generated_cpp} DEPENDS ${interface_header} ${pyx_file} APPEND) | ||||
|   if (NOT "${dependencies}" STREQUAL "") | ||||
|     add_dependencies(${target}_pyx2cpp "${dependencies}") | ||||
|   endif() | ||||
| 
 | ||||
|   build_cythonized_cpp(${target} ${generated_cpp} ${output_lib_we} ${output_dir}) | ||||
|   if (NOT "${libs}" STREQUAL "") | ||||
|     target_link_libraries(${target} "${libs}") | ||||
|   endif() | ||||
|   add_dependencies(${target} ${target}_pyx2cpp) | ||||
| endfunction() | ||||
| 
 | ||||
| # Internal function that wraps a library and compiles the wrapper | ||||
| function(wrap_library_cython interface_header generated_files_path extra_imports libs dependencies) | ||||
|   # Wrap codegen interface | ||||
|   # Extract module path and name from interface header file name | ||||
|   # wrap requires interfacePath to be *absolute* | ||||
|   get_filename_component(interface_header "${interface_header}" ABSOLUTE) | ||||
|   get_filename_component(module_path "${interface_header}" PATH) | ||||
|   get_filename_component(module_name "${interface_header}" NAME_WE) | ||||
| 
 | ||||
|   # Wrap module to Cython pyx | ||||
|   message(STATUS "Cython wrapper generating ${module_name}.pyx") | ||||
|   set(generated_pyx "${generated_files_path}/${module_name}.pyx") | ||||
|   file(MAKE_DIRECTORY "${generated_files_path}") | ||||
|   add_custom_command( | ||||
|     OUTPUT ${generated_pyx} | ||||
|     DEPENDS ${interface_header} wrap | ||||
|     COMMAND | ||||
|         wrap --cython ${module_path} ${module_name} ${generated_files_path} "${extra_imports}" | ||||
|     VERBATIM | ||||
|     WORKING_DIRECTORY ${generated_files_path}/../) | ||||
|   add_custom_target(cython_wrap_${module_name}_pyx ALL DEPENDS ${generated_pyx}) | ||||
|   if(NOT "${dependencies}" STREQUAL "") | ||||
|     add_dependencies(cython_wrap_${module_name}_pyx ${dependencies}) | ||||
|   endif() | ||||
| 
 | ||||
|   message(STATUS "Cythonize and build ${module_name}.pyx") | ||||
|   get_property(include_dirs DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES) | ||||
|   cythonize(cythonize_${module_name} ${generated_pyx} ${module_name} | ||||
|     ${generated_files_path} "${include_dirs}" "${libs}" ${interface_header} cython_wrap_${module_name}_pyx) | ||||
| 
 | ||||
|   # distclean | ||||
|   add_custom_target(wrap_${module_name}_cython_distclean | ||||
|       COMMAND cmake -E remove_directory ${generated_files_path}) | ||||
| endfunction() | ||||
| 
 | ||||
| # Internal function that installs a wrap toolbox | ||||
| function(install_cython_wrapped_library interface_header generated_files_path install_path) | ||||
|   get_filename_component(module_name "${interface_header}" NAME_WE) | ||||
| 
 | ||||
|   # NOTE: only installs .pxd and .pyx and binary files (not .cpp) - the trailing slash on the directory name | ||||
|   # here prevents creating the top-level module name directory in the destination. | ||||
|   # Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH/subdirectory if there is one | ||||
|   get_filename_component(location "${install_path}" PATH) | ||||
|   get_filename_component(name "${install_path}" NAME) | ||||
|   message(STATUS "Installing Cython Toolbox to ${location}${GTSAM_BUILD_TAG}/${name}") #${GTSAM_CYTHON_INSTALL_PATH}" | ||||
| 
 | ||||
|   if(GTSAM_BUILD_TYPE_POSTFIXES) | ||||
|     foreach(build_type ${CMAKE_CONFIGURATION_TYPES}) | ||||
|       string(TOUPPER "${build_type}" build_type_upper) | ||||
|       if(${build_type_upper} STREQUAL "RELEASE") | ||||
|         set(build_type_tag "") # Don't create release mode tag on installed directory | ||||
|       else() | ||||
|         set(build_type_tag "${build_type}") | ||||
|       endif() | ||||
| 
 | ||||
|       install(DIRECTORY "${generated_files_path}/" DESTINATION "${location}${build_type_tag}/${name}" | ||||
|           CONFIGURATIONS "${build_type}" | ||||
|           PATTERN "build" EXCLUDE | ||||
|           PATTERN "CMakeFiles" EXCLUDE | ||||
|           PATTERN "Makefile" EXCLUDE | ||||
|           PATTERN "*.cmake" EXCLUDE | ||||
|           PATTERN "*.cpp" EXCLUDE | ||||
|           PATTERN "*.py" EXCLUDE) | ||||
|     endforeach() | ||||
|   else() | ||||
|     install(DIRECTORY "${generated_files_path}/" DESTINATION ${install_path} | ||||
|         PATTERN "build" EXCLUDE | ||||
|         PATTERN "CMakeFiles" EXCLUDE | ||||
|         PATTERN "Makefile" EXCLUDE | ||||
|         PATTERN "*.cmake" EXCLUDE | ||||
|         PATTERN "*.cpp" EXCLUDE | ||||
|         PATTERN "*.py" EXCLUDE) | ||||
|   endif() | ||||
| endfunction() | ||||
| 
 | ||||
| # Helper function to install Cython scripts and handle multiple build types where the scripts | ||||
| # should be installed to all build type toolboxes | ||||
| # | ||||
| # Arguments: | ||||
| #  source_directory: The source directory to be installed. "The last component of each directory | ||||
| #                    name is appended to the destination directory but a trailing slash may be | ||||
| #                    used to avoid this because it leaves the last component empty." | ||||
| #                    (https://cmake.org/cmake/help/v3.3/command/install.html?highlight=install#installing-directories) | ||||
| #  dest_directory: The destination directory to install to. | ||||
| #  patterns: list of file patterns to install | ||||
| function(install_cython_scripts source_directory dest_directory patterns) | ||||
|   set(patterns_args "") | ||||
|   set(exclude_patterns "") | ||||
| 
 | ||||
|   foreach(pattern ${patterns}) | ||||
|     list(APPEND patterns_args PATTERN "${pattern}") | ||||
|   endforeach() | ||||
|   if(GTSAM_BUILD_TYPE_POSTFIXES) | ||||
|     foreach(build_type ${CMAKE_CONFIGURATION_TYPES}) | ||||
|       string(TOUPPER "${build_type}" build_type_upper) | ||||
|       if(${build_type_upper} STREQUAL "RELEASE") | ||||
|         set(build_type_tag "") # Don't create release mode tag on installed directory | ||||
|       else() | ||||
|         set(build_type_tag "${build_type}") | ||||
|       endif() | ||||
|       # Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one | ||||
|       get_filename_component(location "${dest_directory}" PATH) | ||||
|       get_filename_component(name "${dest_directory}" NAME) | ||||
|       install(DIRECTORY "${source_directory}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}" | ||||
|             FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE) | ||||
|     endforeach() | ||||
|   else() | ||||
|     install(DIRECTORY "${source_directory}" DESTINATION "${dest_directory}" FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE) | ||||
|   endif() | ||||
| 
 | ||||
| endfunction() | ||||
| 
 | ||||
| # Helper function to install specific files and handle multiple build types where the scripts | ||||
| # should be installed to all build type toolboxes | ||||
| # | ||||
| # Arguments: | ||||
| #  source_files: The source files to be installed. | ||||
| #  dest_directory: The destination directory to install to. | ||||
| function(install_cython_files source_files dest_directory) | ||||
| 
 | ||||
|   if(GTSAM_BUILD_TYPE_POSTFIXES) | ||||
|     foreach(build_type ${CMAKE_CONFIGURATION_TYPES}) | ||||
|       string(TOUPPER "${build_type}" build_type_upper) | ||||
|       if(${build_type_upper} STREQUAL "RELEASE") | ||||
|         set(build_type_tag "") # Don't create release mode tag on installed directory | ||||
|       else() | ||||
|         set(build_type_tag "${build_type}") | ||||
|       endif() | ||||
|       # Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one | ||||
|       get_filename_component(location "${dest_directory}" PATH) | ||||
|       get_filename_component(name "${dest_directory}" NAME) | ||||
|       install(FILES "${source_files}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}") | ||||
|     endforeach() | ||||
|   else() | ||||
|     install(FILES "${source_files}" DESTINATION "${dest_directory}") | ||||
|   endif() | ||||
| 
 | ||||
| endfunction() | ||||
| 
 | ||||
|  | @ -15,7 +15,7 @@ function(GtsamMakeConfigFile PACKAGE_NAME) | |||
| 		get_filename_component(name "${ARGV1}" NAME_WE) | ||||
| 		set(EXTRA_FILE "${name}.cmake") | ||||
| 		configure_file(${ARGV1} "${PROJECT_BINARY_DIR}/${EXTRA_FILE}" @ONLY) | ||||
| 		install(FILES "${PROJECT_BINARY_DIR}/${EXTRA_FILE}" DESTINATION "${CMAKE_INSTALL_PREFIX}/${DEF_INSTALL_CMAKE_DIR}") | ||||
| 		install(FILES "${PROJECT_BINARY_DIR}/${EXTRA_FILE}" DESTINATION "${DEF_INSTALL_CMAKE_DIR}") | ||||
| 	else() | ||||
| 		set(EXTRA_FILE "_does_not_exist_") | ||||
| 	endif() | ||||
|  |  | |||
|  | @ -1,46 +1,64 @@ | |||
| # Check / set dependent variables for MATLAB wrapper | ||||
| if(GTSAM_INSTALL_MATLAB_TOOLBOX) | ||||
|     find_package(Matlab COMPONENTS MEX_COMPILER REQUIRED) | ||||
|     if(NOT Matlab_MEX_COMPILER) | ||||
|         message(FATAL_ERROR "Cannot find MEX compiler binary. Please check your Matlab installation and ensure MEX in installed as well.") | ||||
|         endif() | ||||
| 
 | ||||
|     if(GTSAM_BUILD_TYPE_POSTFIXES) | ||||
|         set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX}) | ||||
|     endif() | ||||
| 
 | ||||
|     if(NOT BUILD_SHARED_LIBS) | ||||
|         message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries.  If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.") | ||||
|     endif() | ||||
| endif() | ||||
| 
 | ||||
| # Set up cache options | ||||
| option(GTSAM_MEX_BUILD_STATIC_MODULE "Build MATLAB wrapper statically (increases build time)" OFF) | ||||
| set(GTSAM_BUILD_MEX_BINARY_FLAGS "" CACHE STRING "Extra flags for running Matlab MEX compilation") | ||||
| set(GTSAM_TOOLBOX_INSTALL_PATH "" CACHE PATH "Matlab toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/gtsam_toolbox") | ||||
| if(NOT GTSAM_TOOLBOX_INSTALL_PATH) | ||||
| 	set(GTSAM_TOOLBOX_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/gtsam_toolbox") | ||||
|     set(GTSAM_TOOLBOX_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/gtsam_toolbox") | ||||
| endif() | ||||
| 
 | ||||
| # GTSAM_MEX_BUILD_STATIC_MODULE is not for Windows - on Windows any static | ||||
| # are already compiled into the library by the linker | ||||
| if(GTSAM_MEX_BUILD_STATIC_MODULE AND WIN32) | ||||
| 	message(FATAL_ERROR "GTSAM_MEX_BUILD_STATIC_MODULE should not be set on Windows - the linker already automatically compiles in any dependent static libraries.  To create a standalone toolbox pacakge, simply ensure that CMake finds the static versions of all dependent libraries (Boost, etc).") | ||||
|     message(FATAL_ERROR "GTSAM_MEX_BUILD_STATIC_MODULE should not be set on Windows - the linker already automatically compiles in any dependent static libraries. To create a standalone toolbox pacakge, simply ensure that CMake finds the static versions of all dependent libraries (Boost, etc).") | ||||
| endif() | ||||
| 
 | ||||
| # Try to automatically configure mex path | ||||
| if(APPLE) | ||||
| 	file(GLOB matlab_bin_directories "/Applications/MATLAB*/bin") | ||||
| 	set(mex_program_name "mex") | ||||
| elseif(WIN32) | ||||
| 	file(GLOB matlab_bin_directories "C:/Program Files*/MATLAB/*/bin") | ||||
| 	set(mex_program_name "mex.bat") | ||||
| else() | ||||
| 	file(GLOB matlab_bin_directories "/usr/local/MATLAB/*/bin") | ||||
| 	set(mex_program_name "mex") | ||||
| set(MEX_COMMAND ${Matlab_MEX_COMPILER} CACHE PATH "Path to MATLAB MEX compiler") | ||||
| set(MATLAB_ROOT ${Matlab_ROOT_DIR} CACHE PATH "Path to MATLAB installation root (e.g. /usr/local/MATLAB/R2012a)") | ||||
| 
 | ||||
| # Try to automatically configure mex path from provided custom `bin` path. | ||||
| if(GTSAM_CUSTOM_MATLAB_PATH) | ||||
|     set(matlab_bin_directory ${GTSAM_CUSTOM_MATLAB_PATH}) | ||||
| 
 | ||||
|     if(WIN32) | ||||
|         set(mex_program_name "mex.bat") | ||||
|     else() | ||||
|         set(mex_program_name "mex") | ||||
|     endif() | ||||
| 
 | ||||
|     # Run find_program explicitly putting $PATH after our predefined program | ||||
|     # directories using 'ENV PATH' and 'NO_SYSTEM_ENVIRONMENT_PATH' - this prevents | ||||
|     # finding the LaTeX mex program (totally unrelated to MATLAB Mex) when LaTeX is | ||||
|     # on the system path. | ||||
|     find_program(MEX_COMMAND ${mex_program_name} | ||||
|        PATHS ${matlab_bin_directory} ENV PATH | ||||
|        NO_DEFAULT_PATH) | ||||
| 
 | ||||
|     mark_as_advanced(FORCE MEX_COMMAND) | ||||
|     # Now that we have mex, trace back to find the Matlab installation root | ||||
|     get_filename_component(MEX_COMMAND "${MEX_COMMAND}" REALPATH) | ||||
|     get_filename_component(mex_path "${MEX_COMMAND}" PATH) | ||||
|     if(mex_path MATCHES ".*/win64$") | ||||
|        get_filename_component(MATLAB_ROOT "${mex_path}/../.." ABSOLUTE) | ||||
|     else() | ||||
|        get_filename_component(MATLAB_ROOT "${mex_path}/.." ABSOLUTE) | ||||
|     endif() | ||||
| endif() | ||||
| # Run find_program explicitly putting $PATH after our predefined program | ||||
| # directories using 'ENV PATH' and 'NO_SYSTEM_ENVIRONMENT_PATH' - this prevents | ||||
| # finding the LaTeX mex program (totally unrelated to MATLAB Mex) when LaTeX is | ||||
| # on the system path. | ||||
| list(REVERSE matlab_bin_directories) # Reverse list so the highest version (sorted alphabetically) is preferred | ||||
| find_program(MEX_COMMAND ${mex_program_name} | ||||
| 	PATHS ${matlab_bin_directories} ENV PATH | ||||
| 	NO_DEFAULT_PATH) | ||||
| mark_as_advanced(FORCE MEX_COMMAND) | ||||
| # Now that we have mex, trace back to find the Matlab installation root | ||||
| get_filename_component(MEX_COMMAND "${MEX_COMMAND}" REALPATH) | ||||
| get_filename_component(mex_path "${MEX_COMMAND}" PATH) | ||||
| if(mex_path MATCHES ".*/win64$") | ||||
| 	get_filename_component(MATLAB_ROOT "${mex_path}/../.." ABSOLUTE) | ||||
| else() | ||||
| 	get_filename_component(MATLAB_ROOT "${mex_path}/.." ABSOLUTE) | ||||
| endif() | ||||
| set(MATLAB_ROOT "${MATLAB_ROOT}" CACHE PATH "Path to MATLAB installation root (e.g. /usr/local/MATLAB/R2012a)") | ||||
| 
 | ||||
| 
 | ||||
| # User-friendly wrapping function.  Builds a mex module from the provided | ||||
|  | @ -209,15 +227,30 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex | |||
| 
 | ||||
| 	# Set up generation of module source file | ||||
| 	file(MAKE_DIRECTORY "${generated_files_path}") | ||||
| 
 | ||||
|     find_package(PythonInterp | ||||
|             ${GTSAM_PYTHON_VERSION} | ||||
|             EXACT | ||||
|             REQUIRED) | ||||
|     find_package(PythonLibs | ||||
|             ${GTSAM_PYTHON_VERSION} | ||||
|             EXACT | ||||
|             REQUIRED) | ||||
| 
 | ||||
| 
 | ||||
| 	set(_ignore gtsam::Point2 | ||||
| 			gtsam::Point3) | ||||
| 	add_custom_command( | ||||
| 		OUTPUT ${generated_cpp_file} | ||||
| 		DEPENDS ${interfaceHeader} wrap ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects} | ||||
|         COMMAND  | ||||
|             wrap --matlab | ||||
|             ${modulePath} | ||||
|             ${moduleName}  | ||||
|             ${generated_files_path}  | ||||
|             ${matlab_h_path}  | ||||
| 		DEPENDS ${interfaceHeader} ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects} | ||||
|         COMMAND | ||||
| 			${PYTHON_EXECUTABLE} | ||||
| 			${CMAKE_SOURCE_DIR}/wrap/matlab_wrapper.py | ||||
|             --src ${interfaceHeader} | ||||
| 			--module_name ${moduleName} | ||||
|             --out ${generated_files_path} | ||||
| 			--top_module_namespaces ${moduleName} | ||||
| 			--ignore ${_ignore} | ||||
| 		VERBATIM | ||||
| 		WORKING_DIRECTORY ${generated_files_path}) | ||||
| 		 | ||||
|  |  | |||
|  | @ -1,14 +1,3 @@ | |||
| # print configuration variables | ||||
| # Usage: | ||||
| #print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests                ") | ||||
| function(print_config_flag flag msg) | ||||
|     if (flag) | ||||
|         message(STATUS "  ${msg}: Enabled") | ||||
|     else () | ||||
|         message(STATUS "  ${msg}: Disabled") | ||||
|     endif () | ||||
| endfunction() | ||||
| 
 | ||||
| # Based on https://github.com/jimbraun/XCDF/blob/master/cmake/CMakePadString.cmake | ||||
| function(string_pad RESULT_NAME DESIRED_LENGTH VALUE) | ||||
|     string(LENGTH "${VALUE}" VALUE_LENGTH) | ||||
|  | @ -26,6 +15,27 @@ endfunction() | |||
| set(GTSAM_PRINT_SUMMARY_PADDING_LENGTH 50 CACHE STRING "Padding of cmake summary report lines after configuring.") | ||||
| mark_as_advanced(GTSAM_PRINT_SUMMARY_PADDING_LENGTH) | ||||
| 
 | ||||
| # print configuration variables with automatic padding | ||||
| # Usage: | ||||
| #   print_config(${GTSAM_BUILD_TESTS} "Build Tests") | ||||
| function(print_config config msg) | ||||
|   string_pad(padded_config ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${config}") | ||||
|   message(STATUS "${padded_config}: ${msg}") | ||||
| endfunction() | ||||
| 
 | ||||
| # print configuration variable with enabled/disabled value | ||||
| # Usage: | ||||
| #   print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests                ") | ||||
| function(print_enabled_config config msg) | ||||
|     string_pad(padded_msg ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${msg}") | ||||
|     if (config) | ||||
|         message(STATUS "${padded_msg}: Enabled") | ||||
|     else () | ||||
|         message(STATUS "${padded_msg}: Disabled") | ||||
|     endif () | ||||
| endfunction() | ||||
| 
 | ||||
| 
 | ||||
| # Print "  var: ${var}" padding with spaces as needed | ||||
| function(print_padded variable_name) | ||||
|   string_pad(padded_prop ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${variable_name}") | ||||
|  | @ -36,16 +46,16 @@ endfunction() | |||
| # Prints all the relevant CMake build options for a given target: | ||||
| function(print_build_options_for_target target_name_) | ||||
|   print_padded(GTSAM_COMPILE_FEATURES_PUBLIC) | ||||
|   print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE) | ||||
|   # print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE) | ||||
|   print_padded(GTSAM_COMPILE_OPTIONS_PUBLIC) | ||||
|   print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE) | ||||
|   # print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE) | ||||
|   print_padded(GTSAM_COMPILE_DEFINITIONS_PUBLIC) | ||||
| 
 | ||||
|   foreach(build_type ${GTSAM_CMAKE_CONFIGURATION_TYPES}) | ||||
|     string(TOUPPER "${build_type}" build_type_toupper) | ||||
|     print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE_${build_type_toupper}) | ||||
|     # print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE_${build_type_toupper}) | ||||
|     print_padded(GTSAM_COMPILE_OPTIONS_PUBLIC_${build_type_toupper}) | ||||
|     print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE_${build_type_toupper}) | ||||
|     # print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE_${build_type_toupper}) | ||||
|     print_padded(GTSAM_COMPILE_DEFINITIONS_PUBLIC_${build_type_toupper}) | ||||
|   endforeach() | ||||
| endfunction() | ||||
|  |  | |||
|  | @ -1,102 +0,0 @@ | |||
| #Setup cache options | ||||
| set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "Target python version for GTSAM python module. Use 'Default' to chose the default version") | ||||
| set(GTSAM_BUILD_PYTHON_FLAGS "" CACHE STRING "Extra flags for running Matlab PYTHON compilation") | ||||
| set(GTSAM_PYTHON_INSTALL_PATH "" CACHE PATH "Python toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/borg/python") | ||||
| if(NOT GTSAM_PYTHON_INSTALL_PATH) | ||||
|   set(GTSAM_PYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/borg/python") | ||||
| endif()  | ||||
| 
 | ||||
| #Author: Paul Furgale Modified by Andrew Melim | ||||
| function(wrap_python TARGET_NAME PYTHON_MODULE_DIRECTORY) | ||||
|   # # Boost | ||||
|   # find_package(Boost COMPONENTS python filesystem system REQUIRED) | ||||
|   # include_directories(${Boost_INCLUDE_DIRS}) | ||||
| 
 | ||||
|   # # Find Python | ||||
|   # FIND_PACKAGE(PythonLibs 2.7 REQUIRED) | ||||
|   # INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIRS}) | ||||
| 
 | ||||
|   IF(APPLE) | ||||
|     # The apple framework headers don't include the numpy headers for some reason. | ||||
|     GET_FILENAME_COMPONENT(REAL_PYTHON_INCLUDE ${PYTHON_INCLUDE_DIRS} REALPATH) | ||||
|     IF( ${REAL_PYTHON_INCLUDE} MATCHES Python.framework) | ||||
|       message("Trying to find extra headers for numpy from ${REAL_PYTHON_INCLUDE}.") | ||||
|       message("Looking in ${REAL_PYTHON_INCLUDE}/../../Extras/lib/python/numpy/core/include/numpy") | ||||
|       FIND_PATH(NUMPY_INCLUDE_DIR arrayobject.h | ||||
|   ${REAL_PYTHON_INCLUDE}/../../Extras/lib/python/numpy/core/include/numpy | ||||
|   ${REAL_PYTHON_INCLUDE}/numpy | ||||
|   ) | ||||
|       IF(${NUMPY_INCLUDE_DIR} MATCHES NOTFOUND) | ||||
|   message("Unable to find numpy include directories: ${NUMPY_INCLUDE_DIR}") | ||||
|       ELSE() | ||||
|   message("Found headers at ${NUMPY_INCLUDE_DIR}") | ||||
|   INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR}) | ||||
|   INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR}/..) | ||||
|       ENDIF() | ||||
|     ENDIF() | ||||
|   ENDIF(APPLE) | ||||
| 
 | ||||
|   if(MSVC) | ||||
|     add_library(${moduleName}_python MODULE ${ARGN}) | ||||
|     set_target_properties(${moduleName}_python PROPERTIES | ||||
|         OUTPUT_NAME         ${moduleName}_python | ||||
|         CLEAN_DIRECT_OUTPUT 1 | ||||
|         VERSION             1 | ||||
|         SOVERSION           0 | ||||
|         SUFFIX              ".pyd") | ||||
|       target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp | ||||
| 
 | ||||
|     set(PYLIB_OUTPUT_FILE $<TARGET_FILE:${moduleName}_python>) | ||||
|     message(${PYLIB_OUTPUT_FILE}) | ||||
|     get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE) | ||||
|     set(PYLIB_SO_NAME ${PYLIB_OUTPUT_NAME}.pyd) | ||||
| 
 | ||||
|   ELSE() | ||||
|     # Create a shared library | ||||
|     add_library(${moduleName}_python SHARED ${generated_cpp_file}) | ||||
| 
 | ||||
|     set_target_properties(${moduleName}_python PROPERTIES | ||||
|         OUTPUT_NAME         ${moduleName}_python | ||||
|         CLEAN_DIRECT_OUTPUT 1) | ||||
|       target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp | ||||
|     # On OSX and Linux, the python library must end in the extension .so. Build this | ||||
|     # filename here. | ||||
|     get_property(PYLIB_OUTPUT_FILE TARGET ${moduleName}_python PROPERTY LOCATION) | ||||
|     set(PYLIB_OUTPUT_FILE $<TARGET_FILE:${moduleName}_python>) | ||||
|     message(${PYLIB_OUTPUT_FILE}) | ||||
|     get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE) | ||||
|     set(PYLIB_SO_NAME lib${moduleName}_python.so) | ||||
|   ENDIF(MSVC) | ||||
| 
 | ||||
|   # Installs the library in the gtsam folder, which is used by setup.py to create the gtsam package | ||||
|   set(PYTHON_MODULE_DIRECTORY ${CMAKE_SOURCE_DIR}/python/gtsam) | ||||
|   # Cause the library to be output in the correct directory. | ||||
|   add_custom_command(TARGET ${moduleName}_python | ||||
|     POST_BUILD | ||||
|     COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME} | ||||
|     WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} | ||||
|     COMMENT "Copying library files to python directory" ) | ||||
| 
 | ||||
|   # Cause the library to be output in the correct directory. | ||||
|   add_custom_command(TARGET ${TARGET_NAME} | ||||
|     POST_BUILD | ||||
|     COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME} | ||||
|     WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} | ||||
|     COMMENT "Copying library files to python directory" ) | ||||
| 
 | ||||
|   get_directory_property(AMCF ADDITIONAL_MAKE_CLEAN_FILES) | ||||
|   list(APPEND AMCF ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME}) | ||||
|   set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${AMCF}")  | ||||
| endfunction(wrap_python) | ||||
| 
 | ||||
| # Macro to get list of subdirectories | ||||
| macro(SUBDIRLIST result curdir) | ||||
|   file(GLOB children RELATIVE ${curdir} ${curdir}/*) | ||||
|   set(dirlist "") | ||||
|   foreach(child ${children}) | ||||
|     if(IS_DIRECTORY ${curdir}/${child}) | ||||
|         list(APPEND dirlist ${child}) | ||||
|     endif() | ||||
|   endforeach() | ||||
|   set(${result} ${dirlist}) | ||||
| endmacro() | ||||
|  | @ -0,0 +1,34 @@ | |||
| # Build list of possible allocators | ||||
| set(possible_allocators "") | ||||
| if(GTSAM_USE_TBB) | ||||
|     list(APPEND possible_allocators TBB) | ||||
|     set(preferred_allocator TBB) | ||||
| else() | ||||
|     list(APPEND possible_allocators BoostPool STL) | ||||
|     set(preferred_allocator STL) | ||||
| endif() | ||||
| if(GOOGLE_PERFTOOLS_FOUND) | ||||
|     list(APPEND possible_allocators tcmalloc) | ||||
| endif() | ||||
| 
 | ||||
| # Check if current allocator choice is valid and set cache option | ||||
| list(FIND possible_allocators "${GTSAM_DEFAULT_ALLOCATOR}" allocator_valid) | ||||
| if(allocator_valid EQUAL -1) | ||||
|     set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE) | ||||
| else() | ||||
|     set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator") | ||||
| endif() | ||||
| set_property(CACHE GTSAM_DEFAULT_ALLOCATOR PROPERTY STRINGS ${possible_allocators}) | ||||
| mark_as_advanced(GTSAM_DEFAULT_ALLOCATOR) | ||||
| 
 | ||||
| # Define compile flags depending on allocator | ||||
| if("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "BoostPool") | ||||
|     set(GTSAM_ALLOCATOR_BOOSTPOOL 1) | ||||
| elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "STL") | ||||
|     set(GTSAM_ALLOCATOR_STL 1) | ||||
| elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "TBB") | ||||
|     set(GTSAM_ALLOCATOR_TBB 1) | ||||
| elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc") | ||||
|     set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator | ||||
|     list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc") | ||||
| endif() | ||||
|  | @ -0,0 +1,56 @@ | |||
| ############################################################################### | ||||
| # Find boost | ||||
| 
 | ||||
| # To change the path for boost, you will need to set: | ||||
| # BOOST_ROOT: path to install prefix for boost | ||||
| # Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT | ||||
| 
 | ||||
| if(MSVC) | ||||
|     # By default, boost only builds static libraries on windows | ||||
|     set(Boost_USE_STATIC_LIBS ON)  # only find static libs | ||||
|     # If we ever reset above on windows and, ... | ||||
|     # If we use Boost shared libs, disable auto linking. | ||||
|     # Some libraries, at least Boost Program Options, rely on this to export DLL symbols. | ||||
|     if(NOT Boost_USE_STATIC_LIBS) | ||||
|         list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK) | ||||
|     endif() | ||||
|     # Virtual memory range for PCH exceeded on VS2015 | ||||
|     if(MSVC_VERSION LESS 1910) # older than VS2017 | ||||
|       list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295) | ||||
|     endif() | ||||
| endif() | ||||
| 
 | ||||
| 
 | ||||
| # Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such. | ||||
| set(BOOST_FIND_MINIMUM_VERSION 1.58) | ||||
| set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex) | ||||
| 
 | ||||
| find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS}) | ||||
| 
 | ||||
| # Required components | ||||
| if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR | ||||
|     NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY) | ||||
|   message(FATAL_ERROR "Missing required Boost components >= v1.58, please install/upgrade Boost or configure your search paths.") | ||||
| endif() | ||||
| 
 | ||||
| option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF) | ||||
| # Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library) | ||||
| set(GTSAM_BOOST_LIBRARIES | ||||
|   Boost::serialization | ||||
|   Boost::system | ||||
|   Boost::filesystem | ||||
|   Boost::thread | ||||
|   Boost::date_time | ||||
|   Boost::regex | ||||
| ) | ||||
| if (GTSAM_DISABLE_NEW_TIMERS) | ||||
|     message("WARNING:  GTSAM timing instrumentation manually disabled") | ||||
|     list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS) | ||||
| else() | ||||
|     if(Boost_TIMER_LIBRARY) | ||||
|       list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono) | ||||
|     else() | ||||
|       list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt | ||||
|       message("WARNING:  GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.") | ||||
|     endif() | ||||
| endif() | ||||
|  | @ -0,0 +1,14 @@ | |||
| ############################################################################### | ||||
| # Support ccache, if installed | ||||
| if(NOT MSVC AND NOT XCODE_VERSION) | ||||
|     find_program(CCACHE_FOUND ccache) | ||||
|     if(CCACHE_FOUND) | ||||
|         if(GTSAM_BUILD_WITH_CCACHE) | ||||
|             set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) | ||||
|             set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) | ||||
|         else() | ||||
|             set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "") | ||||
|             set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "") | ||||
|         endif() | ||||
|     endif(CCACHE_FOUND) | ||||
| endif() | ||||
|  | @ -0,0 +1,28 @@ | |||
| #JLBC: is all this actually used by someone? could it be removed? | ||||
| 
 | ||||
| # Flags for choosing default packaging tools | ||||
| set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator") | ||||
| set(CPACK_GENERATOR        "TGZ" CACHE STRING "CPack Default Binary Generator") | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Set up CPack | ||||
| set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM") | ||||
| set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology") | ||||
| set(CPACK_PACKAGE_CONTACT "Frank Dellaert, dellaert@cc.gatech.edu") | ||||
| set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md") | ||||
| set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE") | ||||
| set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR}) | ||||
| set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR}) | ||||
| set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH}) | ||||
| set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}") | ||||
| #set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory | ||||
| #set(CPACK_INSTALLED_DIRECTORIES ".") # FIXME: throws error | ||||
| set(CPACK_SOURCE_IGNORE_FILES "/build*;/\\\\.;/makestats.sh$") | ||||
| set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/gtsam_unstable/") | ||||
| set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/package_scripts/") | ||||
| set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}") | ||||
| #set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs | ||||
| 
 | ||||
| # Deb-package specific cpack | ||||
| set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev") | ||||
| set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.58)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)") | ||||
|  | @ -0,0 +1,77 @@ | |||
| ############################################################################### | ||||
| # Option for using system Eigen or GTSAM-bundled Eigen | ||||
| 
 | ||||
| option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF) | ||||
| 
 | ||||
| if(NOT GTSAM_USE_SYSTEM_EIGEN) | ||||
|   # This option only makes sense if using the embedded copy of Eigen, it is | ||||
|   # used to decide whether to *install* the "unsupported" module: | ||||
|   option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF) | ||||
| endif() | ||||
| 
 | ||||
| # Switch for using system Eigen or GTSAM-bundled Eigen | ||||
| if(GTSAM_USE_SYSTEM_EIGEN) | ||||
|     find_package(Eigen3 REQUIRED) | ||||
| 
 | ||||
|     # Use generic Eigen include paths e.g. <Eigen/Core> | ||||
|     set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}") | ||||
| 
 | ||||
|     # check if MKL is also enabled - can have one or the other, but not both! | ||||
|     # Note: Eigen >= v3.2.5 includes our patches | ||||
|     if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5)) | ||||
|       message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL") | ||||
|     endif() | ||||
| 
 | ||||
|     # Check for Eigen version which doesn't work with MKL | ||||
|     # See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details. | ||||
|     if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4)) | ||||
|         message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.") | ||||
|     endif() | ||||
| 
 | ||||
|     # The actual include directory (for BUILD cmake target interface): | ||||
|     set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}") | ||||
| else() | ||||
|     # Use bundled Eigen include path. | ||||
|     # Clear any variables set by FindEigen3 | ||||
|     if(EIGEN3_INCLUDE_DIR) | ||||
|         set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE) | ||||
|     endif() | ||||
| 
 | ||||
|     # set full path to be used by external projects | ||||
|     # this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in | ||||
|     set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/") | ||||
| 
 | ||||
|     # The actual include directory (for BUILD cmake target interface): | ||||
|     set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/") | ||||
| endif() | ||||
| 
 | ||||
| # Detect Eigen version: | ||||
| set(EIGEN_VER_H "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}/Eigen/src/Core/util/Macros.h") | ||||
| if (EXISTS ${EIGEN_VER_H}) | ||||
|     file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION) | ||||
| 
 | ||||
|     # Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION  XX", etc... | ||||
| 
 | ||||
|     string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}") | ||||
|     string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}") | ||||
| 
 | ||||
|     string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}") | ||||
|     string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}") | ||||
| 
 | ||||
|     string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}") | ||||
|     string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}") | ||||
| 
 | ||||
|     set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}") | ||||
| 
 | ||||
|     message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}") | ||||
| else() | ||||
|     message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`") | ||||
| endif () | ||||
| 
 | ||||
| if (MSVC) | ||||
|     if (BUILD_SHARED_LIBS) | ||||
|         # mute eigen static assert to avoid errors in shared lib | ||||
|         list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) | ||||
|     endif() | ||||
|     list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen | ||||
| endif() | ||||
|  | @ -0,0 +1,10 @@ | |||
| # Print warnings at the end | ||||
| if(GTSAM_WITH_TBB AND NOT TBB_FOUND) | ||||
|     message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled.  Set GTSAM_WITH_TBB to 'Off' to avoid this warning.") | ||||
| endif() | ||||
| if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND) | ||||
|     message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled.  Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning.  See INSTALL.md for notes on performance.") | ||||
| endif() | ||||
| if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND) | ||||
|     message(WARNING "Your compiler does not support OpenMP.  Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.") | ||||
| endif() | ||||
|  | @ -0,0 +1,45 @@ | |||
| ############################################################################### | ||||
| # Set up options | ||||
| 
 | ||||
| # See whether gtsam_unstable is available (it will be present only if we're using a git checkout) | ||||
| if(EXISTS "${PROJECT_SOURCE_DIR}/gtsam_unstable" AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/gtsam_unstable") | ||||
|     set(GTSAM_UNSTABLE_AVAILABLE 1) | ||||
| else() | ||||
|     set(GTSAM_UNSTABLE_AVAILABLE 0) | ||||
| endif() | ||||
| 
 | ||||
| # Configurable Options | ||||
| if(GTSAM_UNSTABLE_AVAILABLE) | ||||
|     option(GTSAM_BUILD_UNSTABLE              "Enable/Disable libgtsam_unstable"          ON) | ||||
|     option(GTSAM_UNSTABLE_BUILD_PYTHON       "Enable/Disable Python wrapper for libgtsam_unstable" ON) | ||||
|     option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF) | ||||
| endif() | ||||
| option(BUILD_SHARED_LIBS                 "Build shared gtsam library, instead of static" ON) | ||||
| option(GTSAM_USE_QUATERNIONS             "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF) | ||||
| option(GTSAM_POSE3_EXPMAP                "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON) | ||||
| option(GTSAM_ROT3_EXPMAP                 "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON) | ||||
| option(GTSAM_ENABLE_CONSISTENCY_CHECKS   "Enable/Disable expensive consistency checks"       OFF) | ||||
| option(GTSAM_WITH_TBB                    "Use Intel Threaded Building Blocks (TBB) if available" ON) | ||||
| option(GTSAM_WITH_EIGEN_MKL              "Eigen will use Intel MKL if available" OFF) | ||||
| option(GTSAM_WITH_EIGEN_MKL_OPENMP       "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) | ||||
| option(GTSAM_THROW_CHEIRALITY_EXCEPTION  "Throw exception when a triangulated point is behind a camera" ON) | ||||
| option(GTSAM_BUILD_PYTHON                "Enable/Disable building & installation of Python module with pybind11" OFF) | ||||
| option(GTSAM_INSTALL_MATLAB_TOOLBOX      "Enable/Disable installation of matlab toolbox"  OFF) | ||||
| option(GTSAM_ALLOW_DEPRECATED_SINCE_V41  "Allow use of methods/functions deprecated in GTSAM 4.1" ON) | ||||
| option(GTSAM_SUPPORT_NESTED_DISSECTION   "Support Metis-based nested dissection" ON) | ||||
| option(GTSAM_TANGENT_PREINTEGRATION      "Use new ImuFactor with integration on tangent space" ON) | ||||
| if(NOT MSVC AND NOT XCODE_VERSION) | ||||
|     option(GTSAM_BUILD_WITH_CCACHE           "Use ccache compiler cache" ON) | ||||
| endif() | ||||
| 
 | ||||
| # Enable GTSAM_ROT3_EXPMAP if GTSAM_POSE3_EXPMAP is enabled, and vice versa. | ||||
| if(GTSAM_POSE3_EXPMAP) | ||||
|     message(STATUS "GTSAM_POSE3_EXPMAP=ON, enabling GTSAM_ROT3_EXPMAP as well") | ||||
|     set(GTSAM_ROT3_EXPMAP 1 CACHE BOOL "" FORCE) | ||||
| elseif(GTSAM_ROT3_EXPMAP) | ||||
|     message(STATUS "GTSAM_ROT3_EXPMAP=ON, enabling GTSAM_POSE3_EXPMAP as well") | ||||
|     set(GTSAM_POSE3_EXPMAP 1 CACHE BOOL "" FORCE) | ||||
| endif() | ||||
| 
 | ||||
| # Set the default Python version. This is later updated in HandlePython.cmake. | ||||
| set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of Python to build the wrappers against.") | ||||
|  | @ -0,0 +1,52 @@ | |||
| # JLBC: These should ideally be ported to "modern cmake" via target properties. | ||||
| # | ||||
| 
 | ||||
| if (CMAKE_GENERATOR STREQUAL "Ninja" AND | ||||
|     ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) OR | ||||
|      (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5))) | ||||
|     # Force colored warnings in Ninja's output, if the compiler has -fdiagnostics-color support. | ||||
|     # Rationale in https://github.com/ninja-build/ninja/issues/814 | ||||
|     add_compile_options(-fdiagnostics-color=always) | ||||
| endif() | ||||
| 
 | ||||
| 
 | ||||
| # If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT() | ||||
| # or explicit instantiation will generate build errors. | ||||
| # See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017 | ||||
| # | ||||
| if(MSVC AND BUILD_SHARED_LIBS) | ||||
|     list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) | ||||
| endif() | ||||
| 
 | ||||
| if (APPLE AND BUILD_SHARED_LIBS) | ||||
|     # Set the default install directory on macOS | ||||
|     set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib") | ||||
| endif() | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Global compile options | ||||
| 
 | ||||
| if(MSVC) | ||||
|     list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS) | ||||
|     list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings | ||||
|     list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code | ||||
| endif() | ||||
| 
 | ||||
| # GCC 4.8+ complains about local typedefs which we use for shared_ptr etc. | ||||
| if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") | ||||
|   if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8) | ||||
|     list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs) | ||||
|   endif() | ||||
| endif() | ||||
| 
 | ||||
| # As of XCode 7, clang also complains about this | ||||
| if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") | ||||
|   if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) | ||||
|     list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs) | ||||
|   endif() | ||||
| endif() | ||||
| 
 | ||||
| if(GTSAM_ENABLE_CONSISTENCY_CHECKS) | ||||
|   # This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h | ||||
|   list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS) | ||||
| endif() | ||||
|  | @ -0,0 +1,17 @@ | |||
| ############################################################################### | ||||
| # Find MKL | ||||
| find_package(MKL) | ||||
| 
 | ||||
| if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL) | ||||
|     set(GTSAM_USE_EIGEN_MKL 1) # This will go into config.h | ||||
|     set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL | ||||
|     list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES}) | ||||
| 
 | ||||
|     # --no-as-needed is required with gcc according to the MKL link advisor | ||||
|     if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") | ||||
|         set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed") | ||||
|     endif() | ||||
| else() | ||||
|     set(GTSAM_USE_EIGEN_MKL 0) | ||||
|     set(EIGEN_USE_MKL_ALL 0) | ||||
| endif() | ||||
|  | @ -0,0 +1,11 @@ | |||
| 
 | ||||
| ############################################################################### | ||||
| # Find OpenMP (if we're also using MKL) | ||||
| find_package(OpenMP)  # do this here to generate correct message if disabled | ||||
| 
 | ||||
| if(GTSAM_WITH_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL) | ||||
|     if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP) | ||||
|         set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h | ||||
|         list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC ${OpenMP_CXX_FLAGS}) | ||||
|     endif() | ||||
| endif() | ||||
|  | @ -0,0 +1,4 @@ | |||
| 
 | ||||
| ############################################################################### | ||||
| # Find Google perftools | ||||
| find_package(GooglePerfTools) | ||||
|  | @ -0,0 +1,104 @@ | |||
| ############################################################################### | ||||
| # Print configuration variables | ||||
| message(STATUS "===============================================================") | ||||
| message(STATUS "================  Configuration Options  ======================") | ||||
| print_config("CMAKE_CXX_COMPILER_ID type" "${CMAKE_CXX_COMPILER_ID}") | ||||
| print_config("CMAKE_CXX_COMPILER_VERSION" "${CMAKE_CXX_COMPILER_VERSION}") | ||||
| print_config("CMake version"    "${CMAKE_VERSION}") | ||||
| print_config("CMake generator"  "${CMAKE_GENERATOR}") | ||||
| print_config("CMake build tool" "${CMAKE_BUILD_TOOL}") | ||||
| message(STATUS "Build flags                                               ") | ||||
| print_enabled_config(${GTSAM_BUILD_TESTS}                 "Build Tests") | ||||
| print_enabled_config(${GTSAM_BUILD_EXAMPLES_ALWAYS}       "Build examples with 'make all'") | ||||
| print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS}         "Build timing scripts with 'make all'") | ||||
| if (DOXYGEN_FOUND) | ||||
|     print_enabled_config(${GTSAM_BUILD_DOCS}              "Build Docs") | ||||
| endif() | ||||
| print_enabled_config(${BUILD_SHARED_LIBS}                 "Build shared GTSAM libraries") | ||||
| print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES}        "Put build type in library name") | ||||
| if(GTSAM_UNSTABLE_AVAILABLE) | ||||
|     print_enabled_config(${GTSAM_BUILD_UNSTABLE}          "Build libgtsam_unstable        ") | ||||
|     print_enabled_config(${GTSAM_UNSTABLE_BUILD_PYTHON}   "Build GTSAM unstable Python    ") | ||||
|     print_enabled_config(${GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX} "Build MATLAB Toolbox for unstable") | ||||
| endif() | ||||
| 
 | ||||
| if(NOT MSVC AND NOT XCODE_VERSION) | ||||
|     print_enabled_config(${GTSAM_BUILD_WITH_MARCH_NATIVE}     "Build for native architecture  ") | ||||
|     print_config("Build type" "${CMAKE_BUILD_TYPE}") | ||||
|     print_config("C compilation flags" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") | ||||
|     print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") | ||||
| endif() | ||||
| 
 | ||||
| print_build_options_for_target(gtsam) | ||||
| 
 | ||||
| print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") | ||||
| 
 | ||||
| if(GTSAM_USE_TBB) | ||||
|     print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})") | ||||
| elseif(TBB_FOUND) | ||||
|     print_config("Use Intel TBB" "TBB (Version: ${TBB_VERSION}) found but GTSAM_WITH_TBB is disabled") | ||||
| else() | ||||
|     print_config("Use Intel TBB" "TBB not found") | ||||
| endif() | ||||
| if(GTSAM_USE_EIGEN_MKL) | ||||
|     print_config("Eigen will use MKL" "Yes") | ||||
| elseif(MKL_FOUND) | ||||
|     print_config("Eigen will use MKL" "MKL found but GTSAM_WITH_EIGEN_MKL is disabled") | ||||
| else() | ||||
|     print_config("Eigen will use MKL" "MKL not found") | ||||
| endif() | ||||
| if(GTSAM_USE_EIGEN_MKL_OPENMP) | ||||
|     print_config("Eigen will use MKL and OpenMP" "Yes") | ||||
| elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL) | ||||
|     print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") | ||||
| elseif(OPENMP_FOUND AND NOT MKL_FOUND) | ||||
|     print_config("Eigen will use MKL and OpenMP" "OpenMP found but MKL not found") | ||||
| elseif(OPENMP_FOUND) | ||||
|     print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") | ||||
| else() | ||||
|     print_config("Eigen will use MKL and OpenMP" "OpenMP not found") | ||||
| endif() | ||||
| print_config("Default allocator" "${GTSAM_DEFAULT_ALLOCATOR}") | ||||
| 
 | ||||
| if(GTSAM_THROW_CHEIRALITY_EXCEPTION) | ||||
|     print_config("Cheirality exceptions enabled" "YES") | ||||
| else() | ||||
|     print_config("Cheirality exceptions enabled" "NO") | ||||
| endif() | ||||
| 
 | ||||
| if(NOT MSVC AND NOT XCODE_VERSION) | ||||
|     if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) | ||||
|         print_config("Build with ccache" "Yes") | ||||
|     elseif(CCACHE_FOUND) | ||||
|         print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") | ||||
|     else() | ||||
|         print_config("Build with ccache" "No") | ||||
|     endif() | ||||
| endif() | ||||
| 
 | ||||
| message(STATUS "Packaging flags") | ||||
| print_config("CPack Source Generator" "${CPACK_SOURCE_GENERATOR}") | ||||
| print_config("CPack Generator" "${CPACK_GENERATOR}") | ||||
| 
 | ||||
| message(STATUS "GTSAM flags                                               ") | ||||
| print_enabled_config(${GTSAM_USE_QUATERNIONS}             "Quaternions as default Rot3     ") | ||||
| print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS}   "Runtime consistency checking    ") | ||||
| print_enabled_config(${GTSAM_ROT3_EXPMAP}                 "Rot3 retract is full ExpMap     ") | ||||
| print_enabled_config(${GTSAM_POSE3_EXPMAP}                "Pose3 retract is full ExpMap    ") | ||||
| print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41}  "Allow features deprecated in GTSAM 4.1") | ||||
| print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION}   "Metis-based Nested Dissection   ") | ||||
| print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION}      "Use tangent-space preintegration") | ||||
| 
 | ||||
| message(STATUS "MATLAB toolbox flags") | ||||
| print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX}      "Install MATLAB toolbox          ") | ||||
| if (${GTSAM_INSTALL_MATLAB_TOOLBOX}) | ||||
|     print_config("MATLAB root" "${MATLAB_ROOT}") | ||||
|     print_config("MEX binary" "${MEX_COMMAND}") | ||||
| endif() | ||||
| message(STATUS "Python toolbox flags                                      ") | ||||
| print_enabled_config(${GTSAM_BUILD_PYTHON}                "Build Python module with pybind ") | ||||
| if(GTSAM_BUILD_PYTHON) | ||||
|     print_config("Python version" ${GTSAM_PYTHON_VERSION}) | ||||
| endif() | ||||
| 
 | ||||
| message(STATUS "===============================================================") | ||||
|  | @ -0,0 +1,29 @@ | |||
| # Set Python version if either Python or MATLAB wrapper is requested. | ||||
| if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX) | ||||
|     if(${GTSAM_PYTHON_VERSION} STREQUAL "Default") | ||||
|         # Get info about the Python3 interpreter | ||||
|         # https://cmake.org/cmake/help/latest/module/FindPython3.html#module:FindPython3 | ||||
|         find_package(Python3 COMPONENTS Interpreter Development) | ||||
| 
 | ||||
|         if(NOT ${Python3_FOUND}) | ||||
|             message(FATAL_ERROR "Cannot find Python3 interpreter. Please install Python >= 3.6.") | ||||
|         endif() | ||||
| 
 | ||||
|         set(GTSAM_PYTHON_VERSION "${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}" | ||||
|                 CACHE | ||||
|                 STRING | ||||
|                 "The version of Python to build the wrappers against." | ||||
|                 FORCE) | ||||
|     endif() | ||||
| endif() | ||||
| 
 | ||||
| if(GTSAM_BUILD_PYTHON) | ||||
|     if(GTSAM_UNSTABLE_BUILD_PYTHON) | ||||
|         if (NOT GTSAM_BUILD_UNSTABLE) | ||||
|             message(WARNING "GTSAM_UNSTABLE_BUILD_PYTHON requires the unstable module to be enabled.") | ||||
|             set(GTSAM_UNSTABLE_BUILD_PYTHON OFF) | ||||
|         endif() | ||||
|     endif() | ||||
| 
 | ||||
|     set(GTSAM_PY_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/python") | ||||
| endif() | ||||
|  | @ -0,0 +1,24 @@ | |||
| ############################################################################### | ||||
| # Find TBB | ||||
| find_package(TBB 4.4 COMPONENTS tbb tbbmalloc) | ||||
| 
 | ||||
| # Set up variables if we're using TBB | ||||
| if(TBB_FOUND AND GTSAM_WITH_TBB) | ||||
|     set(GTSAM_USE_TBB 1)  # This will go into config.h | ||||
|     if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020)) | ||||
|         set(TBB_GREATER_EQUAL_2020 1) | ||||
|     else() | ||||
|         set(TBB_GREATER_EQUAL_2020 0) | ||||
|     endif() | ||||
|     # all definitions and link requisites will go via imported targets: | ||||
|     # tbb & tbbmalloc | ||||
|     list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc) | ||||
| else() | ||||
|     set(GTSAM_USE_TBB 0)  # This will go into config.h | ||||
| endif() | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Prohibit Timing build mode in combination with TBB | ||||
| if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE  STREQUAL "Timing")) | ||||
|       message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.") | ||||
| endif() | ||||
|  | @ -0,0 +1,10 @@ | |||
| # ---------------------------------------------------------------------------- | ||||
| #   Uninstall target, for "make uninstall" | ||||
| # ---------------------------------------------------------------------------- | ||||
| configure_file( | ||||
|   "${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in" | ||||
|   "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" | ||||
|   IMMEDIATE @ONLY) | ||||
| 
 | ||||
| add_custom_target(uninstall | ||||
|   "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake") | ||||
|  | @ -47,9 +47,14 @@ | |||
| #    endif | ||||
| #  endif | ||||
| #else | ||||
| #ifdef __APPLE__ | ||||
| #  define @library_name@_EXPORT __attribute__((visibility("default"))) | ||||
| #  define @library_name@_EXTERN_EXPORT extern | ||||
| #else | ||||
| #  define @library_name@_EXPORT | ||||
| #  define @library_name@_EXTERN_EXPORT extern | ||||
| #endif | ||||
| #endif | ||||
| 
 | ||||
| #undef BUILD_SHARED_LIBS | ||||
| 
 | ||||
|  |  | |||
|  | @ -33,7 +33,6 @@ | |||
| // Here we will use Between factors for the relative motion described by odometry measurements.
 | ||||
| // We will also use a Between Factor to encode the loop closure constraint
 | ||||
| // Also, we will initialize the robot at the origin using a Prior factor.
 | ||||
| #include <gtsam/slam/PriorFactor.h> | ||||
| #include <gtsam/slam/BetweenFactor.h> | ||||
| 
 | ||||
| // When the factors are created, we will add them to a Factor Graph. As the factors we are using
 | ||||
|  | @ -69,7 +68,7 @@ int main(int argc, char** argv) { | |||
|   // 2a. Add a prior on the first pose, setting it to the origin
 | ||||
|   // A prior factor consists of a mean and a noise model (covariance matrix)
 | ||||
|   noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1)); | ||||
|   graph.emplace_shared<PriorFactor<Pose2> >(1, Pose2(0, 0, 0), priorNoise); | ||||
|   graph.addPrior(1, Pose2(0, 0, 0), priorNoise); | ||||
| 
 | ||||
|   // For simplicity, we will use the same noise model for odometry and loop closures
 | ||||
|   noiseModel::Diagonal::shared_ptr model = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1)); | ||||
|  |  | |||
|  | @ -1,43 +0,0 @@ | |||
| # Install cython components | ||||
| include(GtsamCythonWrap) | ||||
| 
 | ||||
| # Create the cython toolbox for the gtsam library | ||||
| if (GTSAM_INSTALL_CYTHON_TOOLBOX) | ||||
|   # build and include the eigency version of eigency | ||||
|   add_subdirectory(gtsam_eigency) | ||||
|   include_directories(${PROJECT_BINARY_DIR}/cython/gtsam_eigency) | ||||
| 
 | ||||
|   # wrap gtsam | ||||
|   add_custom_target(gtsam_header DEPENDS "../gtsam.h") | ||||
|   wrap_and_install_library_cython("../gtsam.h" # interface_header | ||||
|                                   ""                  # extra imports | ||||
|                                   "${GTSAM_CYTHON_INSTALL_PATH}/gtsam" # install path | ||||
|                                   gtsam  # library to link with | ||||
|                                   "wrap;cythonize_eigency;gtsam;gtsam_header"  # dependencies which need to be built before wrapping | ||||
|                                   ) | ||||
| 
 | ||||
|   # wrap gtsam_unstable | ||||
|   if(GTSAM_BUILD_UNSTABLE) | ||||
|     add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h") | ||||
|     wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header | ||||
|                                     "from gtsam.gtsam cimport *"                  # extra imports | ||||
|                                     "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable" # install path | ||||
|                                     gtsam_unstable  # library to link with | ||||
|                                     "gtsam_unstable;gtsam_unstable_header;cythonize_gtsam"  # dependencies to be built before wrapping | ||||
|                                     ) | ||||
|   endif() | ||||
| 
 | ||||
|   file(READ "${PROJECT_SOURCE_DIR}/cython/requirements.txt" CYTHON_INSTALL_REQUIREMENTS) | ||||
|   file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS) | ||||
| 
 | ||||
|   # Install the custom-generated __init__.py | ||||
|   # This is to make the build/cython/gtsam folder a python package, so gtsam can be found while wrapping gtsam_unstable | ||||
|   configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam/__init__.py COPYONLY) | ||||
|   configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam_unstable/__init__.py COPYONLY) | ||||
|   configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${PROJECT_BINARY_DIR}/cython/setup.py) | ||||
|   install_cython_files("${PROJECT_BINARY_DIR}/cython/setup.py" "${GTSAM_CYTHON_INSTALL_PATH}") | ||||
|   # install scripts and tests | ||||
|   install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py") | ||||
|   install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py") | ||||
| 
 | ||||
| endif () | ||||
							
								
								
									
										155
									
								
								cython/README.md
								
								
								
								
							
							
						
						
									
										155
									
								
								cython/README.md
								
								
								
								
							|  | @ -1,155 +0,0 @@ | |||
| # Python Wrapper | ||||
| 
 | ||||
| This is the Cython/Python wrapper around the GTSAM C++ library. | ||||
| 
 | ||||
| ## Install | ||||
| 
 | ||||
| - if you want to build the gtsam python library for a specific python version (eg 2.7), use the `-DGTSAM_PYTHON_VERSION=2.7` option when running `cmake` otherwise the default interpreter will be used. | ||||
|     - If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment) then the environment should be active while building gtsam. | ||||
| - This wrapper needs Cython(>=0.25.2), backports_abc>=0.5, and numpy. These can be installed as follows: | ||||
| 
 | ||||
| ```bash | ||||
|  pip install -r <gtsam_folder>/cython/requirements.txt | ||||
| ``` | ||||
| 
 | ||||
| - For compatibility with gtsam's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git), | ||||
| named **gtsam_eigency**, to interface between C++'s Eigen and Python's numpy. | ||||
| 
 | ||||
| - Build and install gtsam using cmake with `GTSAM_INSTALL_CYTHON_TOOLBOX` enabled. | ||||
| The wrapped module will be installed to `GTSAM_CYTHON_INSTALL_PATH`, which is | ||||
| by default: `<your CMAKE_INSTALL_PREFIX>/cython` | ||||
| 
 | ||||
| - To use the library without installing system-wide: modify your `PYTHONPATH` to include the `GTSAM_CYTHON_INSTALL_PATH`: | ||||
| ```bash | ||||
| export PYTHONPATH=$PYTHONPATH:<GTSAM_CYTHON_INSTALL_PATH> | ||||
| ``` | ||||
| - To install system-wide: run `make install` then navigate to `GTSAM_CYTHON_INSTALL_PATH` and run `python setup.py install` | ||||
|     - (the same command can be used to install into a virtual environment if it is active) | ||||
|     - note: if you don't want gtsam to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install gtsam to a subdirectory of the build directory. | ||||
|     - if you run `setup.py` from the build directory rather than the installation directory, the script will warn you with the message: `setup.py is being run from an unexpected location`. | ||||
|       Before `make install` is run, not all the components of the package have been copied across, so running `setup.py` from the build directory would result in an incomplete package. | ||||
| 
 | ||||
| ## Unit Tests | ||||
| 
 | ||||
| The Cython toolbox also has a small set of unit tests located in the | ||||
| test directory. To run them: | ||||
| 
 | ||||
| ```bash | ||||
|  cd <your GTSAM_CYTHON_INSTALL_PATH> | ||||
|  python -m unittest discover | ||||
| ``` | ||||
| 
 | ||||
| ## Writing Your Own Scripts | ||||
| 
 | ||||
| See the tests for examples. | ||||
| 
 | ||||
| ### Some Important Notes: | ||||
| 
 | ||||
| - Vector/Matrix: | ||||
|   + GTSAM expects double-precision floating point vectors and matrices. | ||||
|     Hence, you should pass numpy matrices with dtype=float, or 'float64'. | ||||
|   + Also, GTSAM expects *column-major* matrices, unlike the default storage | ||||
|     scheme in numpy. Hence, you should pass column-major matrices to gtsam using | ||||
|     the flag order='F'. And you always get column-major matrices back. | ||||
|     For more details, see: https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed | ||||
|   + Passing row-major matrices of different dtype, e.g. 'int', will also work | ||||
|     as the wrapper converts them to column-major and dtype float for you, | ||||
|     using numpy.array.astype(float, order='F', copy=False). | ||||
|     However, this will result a copy if your matrix is not in the expected type | ||||
|     and storage order. | ||||
| 
 | ||||
| - Inner namespace: Classes in inner namespace will be prefixed by <innerNamespace>_ in Python. | ||||
| Examples: noiseModel_Gaussian, noiseModel_mEstimator_Tukey | ||||
| 
 | ||||
| - Casting from a base class to a derive class must be done explicitly. | ||||
| Examples: | ||||
| ```Python | ||||
|       noiseBase = factor.noiseModel() | ||||
|       noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase) | ||||
| ``` | ||||
| 
 | ||||
| ## Wrapping Your Own Project That Uses GTSAM | ||||
| 
 | ||||
| - Set PYTHONPATH to include ${GTSAM_CYTHON_INSTALL_PATH} | ||||
|   + so that it can find gtsam Cython header: gtsam/gtsam.pxd | ||||
| 
 | ||||
| - In your CMakeList.txt | ||||
| ```cmake | ||||
| find_package(GTSAM REQUIRED) # Make sure gtsam's install folder is in your PATH | ||||
| set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${GTSAM_DIR}/../GTSAMCMakeTools") | ||||
| 
 | ||||
| # Wrap | ||||
| include(GtsamCythonWrap) | ||||
| include_directories(${GTSAM_EIGENCY_INSTALL_PATH}) | ||||
| wrap_and_install_library_cython("your_project_interface.h" | ||||
|                                 "from gtsam.gtsam cimport *" # extra import of gtsam/gtsam.pxd Cython header | ||||
|                                 "your_install_path" | ||||
|                                 "libraries_to_link_with_the_cython_module" | ||||
|                                 "dependencies_which_need_to_be_built_before_the_wrapper" | ||||
|                                 ) | ||||
| #Optional: install_cython_scripts and install_cython_files. See GtsamCythonWrap.cmake. | ||||
| ``` | ||||
| 
 | ||||
| ## KNOWN ISSUES | ||||
| 
 | ||||
|   - Doesn't work with python3 installed from homebrew | ||||
|     - size-related issue: can only wrap up to a certain number of classes: up to mEstimator! | ||||
|     - Guess: 64 vs 32b? disutils Compiler flags? | ||||
|   - Bug with Cython 0.24: instantiated factor classes return FastVector<size_t> for keys(), which can't be casted to FastVector<Key> | ||||
|     - Upgrading to 0.25 solves the problem | ||||
|   - Need default constructor and default copy constructor for almost every classes... :( | ||||
|     - support these constructors by default and declare "delete" for special classes? | ||||
| 
 | ||||
| 
 | ||||
| ### TODO | ||||
| 
 | ||||
| - [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython. | ||||
| - [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in gtsam?) | ||||
| - [ ] inner namespaces ==> inner packages? | ||||
| - [ ] Wrap fixed-size Matrices/Vectors? | ||||
| 
 | ||||
| 
 | ||||
| ### Completed/Cancelled: | ||||
| 
 | ||||
| - [x] Fix Python tests: don't use " import <package> * ": Bad style!!! (18-03-17 19:50) | ||||
| - [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files | ||||
| - [x] Wrap unstable @done (18-03-17 15:30) | ||||
| - [x] Unify cython/gtsam.h and the original gtsam.h @done (18-03-17 15:30) | ||||
| - [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem. | ||||
| - [x] 06-03-17: manage to remove the requirements for default and copy constructors | ||||
| - [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector<Key>, to FastVector<size_t>. | ||||
| - [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: ```gtsam::JointMarginal __pyx_t_1;```  Users don't have to wrap this constructor, however. | ||||
| - [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00) | ||||
| - [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30) | ||||
| - [x] CMake install script @done (25-11-16 02:30) | ||||
| - [ ] [REFACTOR] better name for uninstantiateClass: very vague!! @cancelled (25-11-16 02:30) -- lazy | ||||
| - [ ] forward declaration? @cancelled (23-11-16 13:00) - nothing to do, seem to work? | ||||
| - [x] wrap VariableIndex: why is it in inference? If need to, shouldn't have constructors to specific FactorGraphs @done (23-11-16 13:00) | ||||
| - [x] Global functions @done (22-11-16 21:00) | ||||
| - [x] [REFACTOR] typesEqual --> isSameSignature @done (22-11-16 21:00) | ||||
| - [x] Proper overloads (constructors, static methods, methods) @done (20-11-16 21:00) | ||||
| - [x] Allow overloading methods. The current solution is annoying!!! @done (20-11-16 21:00) | ||||
| - [x] Casting from parent and grandparents @done (16-11-16 17:00) | ||||
| - [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00) | ||||
| - [x] Support "print obj" @done (16-11-16 17:00) | ||||
| - [x] methods for FastVector: at, [], ...  @done (16-11-16 17:00) | ||||
| - [x] Cython: Key and size_t: traits<size_t> doesn't exist @done (16-09-12 18:34) | ||||
| - [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19) | ||||
| - [x] [Nice to have] parse typedef @done (16-09-13 17:19) | ||||
| - [x] ctypedef at correct places @done (16-09-12 18:34) | ||||
| - [x] expand template variable type in constructor/static methods? @done (16-09-12 18:34) | ||||
| - [x] NonlinearOptimizer: copy constructor deleted!!! @done (16-09-13 17:20) | ||||
| - [x] Value: no default constructor @done (16-09-13 17:20) | ||||
| - [x] ctypedef PriorFactor[Vector] PriorFactorVector @done (16-09-19 12:25) | ||||
| - [x] Delete duplicate methods in derived class @done (16-09-12 13:38) | ||||
| - [x] Fix return properly @done (16-09-11 17:14) | ||||
| - [x] handle pair @done (16-09-11 17:14) | ||||
| - [x] Eigency: ambiguous call: A(const T&) A(const Vector& v) and Eigency A(Map[Vector]& v) @done (16-09-11 07:59) | ||||
| - [x] Eigency: Constructor: ambiguous construct from Vector/Matrix @done (16-09-11 07:59) | ||||
| - [x] Eigency: Fix method template of Vector/Matrix: template argument is [Vector] while arugment is Map[Vector] @done (16-09-11 08:22) | ||||
| - [x] Robust noise: copy assignment operator is deleted because of shared_ptr of the abstract Base class @done (16-09-10 09:05) | ||||
| - [ ] Cython: Constructor: generate default constructor? (hack: if it's serializable?) @cancelled (16-09-13 17:20) | ||||
| - [ ] Eigency: Map[] to Block @created(16-09-10 07:59) @cancelled (16-09-11 08:28) | ||||
| 
 | ||||
| - inference before symbolic/linear | ||||
| - what's the purpose of "virtual" ?? | ||||
|  | @ -1,26 +0,0 @@ | |||
| from .gtsam import * | ||||
| 
 | ||||
| try: | ||||
|     import gtsam_unstable | ||||
| 
 | ||||
| 
 | ||||
|     def _deprecated_wrapper(item, name): | ||||
|         def wrapper(*args, **kwargs): | ||||
|             from warnings import warn | ||||
|             message = ('importing the unstable item "{}" directly from gtsam is deprecated. '.format(name) + | ||||
|                        'Please import it from gtsam_unstable.') | ||||
|             warn(message) | ||||
|             return item(*args, **kwargs) | ||||
|         return wrapper | ||||
| 
 | ||||
| 
 | ||||
|     for name in dir(gtsam_unstable): | ||||
|         if not name.startswith('__'): | ||||
|             item = getattr(gtsam_unstable, name) | ||||
|             if callable(item): | ||||
|                 item = _deprecated_wrapper(item, name) | ||||
|             globals()[name] = item | ||||
| 
 | ||||
| except ImportError: | ||||
|     pass | ||||
| 
 | ||||
|  | @ -1,143 +0,0 @@ | |||
| """ | ||||
| GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, | ||||
| Atlanta, Georgia 30332-0415 | ||||
| All Rights Reserved | ||||
| 
 | ||||
| See LICENSE for the license information | ||||
| 
 | ||||
| A script validating the ImuFactor inference. | ||||
| """ | ||||
| 
 | ||||
| from __future__ import print_function | ||||
| 
 | ||||
| import math | ||||
| 
 | ||||
| import matplotlib.pyplot as plt | ||||
| import numpy as np | ||||
| from mpl_toolkits.mplot3d import Axes3D | ||||
| 
 | ||||
| import gtsam | ||||
| from gtsam.utils.plot import plot_pose3 | ||||
| from PreintegrationExample import POSES_FIG, PreintegrationExample | ||||
| 
 | ||||
| BIAS_KEY = int(gtsam.symbol(ord('b'), 0)) | ||||
| 
 | ||||
| 
 | ||||
| def X(key): | ||||
|     """Create symbol for pose key.""" | ||||
|     return gtsam.symbol(ord('x'), key) | ||||
| 
 | ||||
| 
 | ||||
| def V(key): | ||||
|     """Create symbol for velocity key.""" | ||||
|     return gtsam.symbol(ord('v'), key) | ||||
| 
 | ||||
| 
 | ||||
| np.set_printoptions(precision=3, suppress=True) | ||||
| 
 | ||||
| 
 | ||||
| class ImuFactorExample(PreintegrationExample): | ||||
| 
 | ||||
|     def __init__(self): | ||||
|         self.velocity = np.array([2, 0, 0]) | ||||
|         self.priorNoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) | ||||
|         self.velNoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) | ||||
| 
 | ||||
|         # Choose one of these twists to change scenario: | ||||
|         zero_twist = (np.zeros(3), np.zeros(3)) | ||||
|         forward_twist = (np.zeros(3), self.velocity) | ||||
|         loop_twist = (np.array([0, -math.radians(30), 0]), self.velocity) | ||||
|         sick_twist = ( | ||||
|             np.array([math.radians(30), -math.radians(30), 0]), self.velocity) | ||||
| 
 | ||||
|         accBias = np.array([-0.3, 0.1, 0.2]) | ||||
|         gyroBias = np.array([0.1, 0.3, -0.1]) | ||||
|         bias = gtsam.imuBias_ConstantBias(accBias, gyroBias) | ||||
| 
 | ||||
|         dt = 1e-2 | ||||
|         super(ImuFactorExample, self).__init__(sick_twist, bias, dt) | ||||
| 
 | ||||
|     def addPrior(self, i, graph): | ||||
|         state = self.scenario.navState(i) | ||||
|         graph.push_back(gtsam.PriorFactorPose3( | ||||
|             X(i), state.pose(), self.priorNoise)) | ||||
|         graph.push_back(gtsam.PriorFactorVector( | ||||
|             V(i), state.velocity(), self.velNoise)) | ||||
| 
 | ||||
|     def run(self): | ||||
|         graph = gtsam.NonlinearFactorGraph() | ||||
| 
 | ||||
|         # initialize data structure for pre-integrated IMU measurements | ||||
|         pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias) | ||||
| 
 | ||||
|         T = 12 | ||||
|         num_poses = T + 1  # assumes 1 factor per second | ||||
|         initial = gtsam.Values() | ||||
|         initial.insert(BIAS_KEY, self.actualBias) | ||||
|         for i in range(num_poses): | ||||
|             state_i = self.scenario.navState(float(i)) | ||||
|             initial.insert(X(i), state_i.pose()) | ||||
|             initial.insert(V(i), state_i.velocity()) | ||||
| 
 | ||||
|         # simulate the loop | ||||
|         i = 0  # state index | ||||
|         actual_state_i = self.scenario.navState(0) | ||||
|         for k, t in enumerate(np.arange(0, T, self.dt)): | ||||
|             # get measurements and add them to PIM | ||||
|             measuredOmega = self.runner.measuredAngularVelocity(t) | ||||
|             measuredAcc = self.runner.measuredSpecificForce(t) | ||||
|             pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt) | ||||
| 
 | ||||
|             # Plot IMU many times | ||||
|             if k % 10 == 0: | ||||
|                 self.plotImu(t, measuredOmega, measuredAcc) | ||||
| 
 | ||||
|             # Plot every second | ||||
|             if k % int(1 / self.dt) == 0: | ||||
|                 self.plotGroundTruthPose(t) | ||||
| 
 | ||||
|             # create IMU factor every second | ||||
|             if (k + 1) % int(1 / self.dt) == 0: | ||||
|                 factor = gtsam.ImuFactor(X(i), V(i), X( | ||||
|                     i + 1), V(i + 1), BIAS_KEY, pim) | ||||
|                 graph.push_back(factor) | ||||
|                 if True: | ||||
|                     print(factor) | ||||
|                     print(pim.predict(actual_state_i, self.actualBias)) | ||||
|                 pim.resetIntegration() | ||||
|                 actual_state_i = self.scenario.navState(t + self.dt) | ||||
|                 i += 1 | ||||
| 
 | ||||
|         # add priors on beginning and end | ||||
|         self.addPrior(0, graph) | ||||
|         self.addPrior(num_poses - 1, graph) | ||||
| 
 | ||||
|         # optimize using Levenberg-Marquardt optimization | ||||
|         params = gtsam.LevenbergMarquardtParams() | ||||
|         params.setVerbosityLM("SUMMARY") | ||||
|         optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) | ||||
|         result = optimizer.optimize() | ||||
| 
 | ||||
|         # Calculate and print marginal covariances | ||||
|         marginals = gtsam.Marginals(graph, result) | ||||
|         print("Covariance on bias:\n", marginals.marginalCovariance(BIAS_KEY)) | ||||
|         for i in range(num_poses): | ||||
|             print("Covariance on pose {}:\n{}\n".format( | ||||
|                 i, marginals.marginalCovariance(X(i)))) | ||||
|             print("Covariance on vel {}:\n{}\n".format( | ||||
|                 i, marginals.marginalCovariance(V(i)))) | ||||
| 
 | ||||
|         # Plot resulting poses | ||||
|         i = 0 | ||||
|         while result.exists(X(i)): | ||||
|             pose_i = result.atPose3(X(i)) | ||||
|             plot_pose3(POSES_FIG, pose_i, 0.1) | ||||
|             i += 1 | ||||
|         print(result.atimuBias_ConstantBias(BIAS_KEY)) | ||||
| 
 | ||||
|         plt.ioff() | ||||
|         plt.show() | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     ImuFactorExample().run() | ||||
|  | @ -1,176 +0,0 @@ | |||
| """ | ||||
| iSAM2 example with ImuFactor. | ||||
| Author: Robert Truax (C++), Frank Dellaert (Python) | ||||
| """ | ||||
| # pylint: disable=invalid-name, E1101 | ||||
| 
 | ||||
| from __future__ import print_function | ||||
| 
 | ||||
| import math | ||||
| 
 | ||||
| import matplotlib.pyplot as plt | ||||
| import numpy as np | ||||
| from mpl_toolkits.mplot3d import Axes3D  # pylint: disable=W0611 | ||||
| 
 | ||||
| import gtsam | ||||
| import gtsam.utils.plot as gtsam_plot | ||||
| 
 | ||||
| 
 | ||||
| def X(key): | ||||
|     """Create symbol for pose key.""" | ||||
|     return gtsam.symbol(ord('x'), key) | ||||
| 
 | ||||
| 
 | ||||
| def V(key): | ||||
|     """Create symbol for velocity key.""" | ||||
|     return gtsam.symbol(ord('v'), key) | ||||
| 
 | ||||
| 
 | ||||
| def vector3(x, y, z): | ||||
|     """Create 3d double numpy array.""" | ||||
|     return np.array([x, y, z], dtype=np.float) | ||||
| 
 | ||||
| 
 | ||||
| def ISAM2_plot(values, fignum=0): | ||||
|     """Plot poses.""" | ||||
|     fig = plt.figure(fignum) | ||||
|     axes = fig.gca(projection='3d') | ||||
|     plt.cla() | ||||
| 
 | ||||
|     i = 0 | ||||
|     min_bounds = 0, 0, 0 | ||||
|     max_bounds = 0, 0, 0 | ||||
|     while values.exists(X(i)): | ||||
|         pose_i = values.atPose3(X(i)) | ||||
|         gtsam_plot.plot_pose3(fignum, pose_i, 10) | ||||
|         position = pose_i.translation().vector() | ||||
|         min_bounds = [min(v1, v2) for (v1, v2) in zip(position, min_bounds)] | ||||
|         max_bounds = [max(v1, v2) for (v1, v2) in zip(position, max_bounds)] | ||||
|         # max_bounds = min(pose_i.x(), max_bounds[0]), 0, 0 | ||||
|         i += 1 | ||||
| 
 | ||||
|     # draw | ||||
|     axes.set_xlim3d(min_bounds[0]-1, max_bounds[0]+1) | ||||
|     axes.set_ylim3d(min_bounds[1]-1, max_bounds[1]+1) | ||||
|     axes.set_zlim3d(min_bounds[2]-1, max_bounds[2]+1) | ||||
|     plt.pause(1) | ||||
| 
 | ||||
| 
 | ||||
| # IMU preintegration parameters | ||||
| # Default Params for a Z-up navigation frame, such as ENU: gravity points along negative Z-axis | ||||
| g = 9.81 | ||||
| n_gravity = vector3(0, 0, -g) | ||||
| PARAMS = gtsam.PreintegrationParams.MakeSharedU(g) | ||||
| I = np.eye(3) | ||||
| PARAMS.setAccelerometerCovariance(I * 0.1) | ||||
| PARAMS.setGyroscopeCovariance(I * 0.1) | ||||
| PARAMS.setIntegrationCovariance(I * 0.1) | ||||
| PARAMS.setUse2ndOrderCoriolis(False) | ||||
| PARAMS.setOmegaCoriolis(vector3(0, 0, 0)) | ||||
| 
 | ||||
| BIAS_COVARIANCE = gtsam.noiseModel_Isotropic.Variance(6, 0.1) | ||||
| DELTA = gtsam.Pose3(gtsam.Rot3.Rodrigues(0, 0, 0), | ||||
|                     gtsam.Point3(0.05, -0.10, 0.20)) | ||||
| 
 | ||||
| 
 | ||||
| def IMU_example(): | ||||
|     """Run iSAM 2 example with IMU factor.""" | ||||
| 
 | ||||
|     # Start with a camera on x-axis looking at origin | ||||
|     radius = 30 | ||||
|     up = gtsam.Point3(0, 0, 1) | ||||
|     target = gtsam.Point3(0, 0, 0) | ||||
|     position = gtsam.Point3(radius, 0, 0) | ||||
|     camera = gtsam.SimpleCamera.Lookat(position, target, up, gtsam.Cal3_S2()) | ||||
|     pose_0 = camera.pose() | ||||
| 
 | ||||
|     # Create the set of ground-truth landmarks and poses | ||||
|     angular_velocity = math.radians(180)  # rad/sec | ||||
|     delta_t = 1.0/18  # makes for 10 degrees per step | ||||
| 
 | ||||
|     angular_velocity_vector = vector3(0, -angular_velocity, 0) | ||||
|     linear_velocity_vector = vector3(radius * angular_velocity, 0, 0) | ||||
|     scenario = gtsam.ConstantTwistScenario( | ||||
|         angular_velocity_vector, linear_velocity_vector, pose_0) | ||||
| 
 | ||||
|     # Create a factor graph | ||||
|     newgraph = gtsam.NonlinearFactorGraph() | ||||
| 
 | ||||
|     # Create (incremental) ISAM2 solver | ||||
|     isam = gtsam.ISAM2() | ||||
| 
 | ||||
|     # Create the initial estimate to the solution | ||||
|     # Intentionally initialize the variables off from the ground truth | ||||
|     initialEstimate = gtsam.Values() | ||||
| 
 | ||||
|     # Add a prior on pose x0. This indirectly specifies where the origin is. | ||||
|     # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw | ||||
|     noise = gtsam.noiseModel_Diagonal.Sigmas( | ||||
|         np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) | ||||
|     newgraph.push_back(gtsam.PriorFactorPose3(X(0), pose_0, noise)) | ||||
| 
 | ||||
|     # Add imu priors | ||||
|     biasKey = gtsam.symbol(ord('b'), 0) | ||||
|     biasnoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) | ||||
|     biasprior = gtsam.PriorFactorConstantBias(biasKey, gtsam.imuBias_ConstantBias(), | ||||
|                                               biasnoise) | ||||
|     newgraph.push_back(biasprior) | ||||
|     initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias()) | ||||
|     velnoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) | ||||
| 
 | ||||
|     # Calculate with correct initial velocity | ||||
|     n_velocity = vector3(0, angular_velocity * radius, 0) | ||||
|     velprior = gtsam.PriorFactorVector(V(0), n_velocity, velnoise) | ||||
|     newgraph.push_back(velprior) | ||||
|     initialEstimate.insert(V(0), n_velocity) | ||||
| 
 | ||||
|     accum = gtsam.PreintegratedImuMeasurements(PARAMS) | ||||
| 
 | ||||
|     # Simulate poses and imu measurements, adding them to the factor graph | ||||
|     for i in range(80): | ||||
|         t = i * delta_t  # simulation time | ||||
|         if i == 0:  # First time add two poses | ||||
|             pose_1 = scenario.pose(delta_t) | ||||
|             initialEstimate.insert(X(0), pose_0.compose(DELTA)) | ||||
|             initialEstimate.insert(X(1), pose_1.compose(DELTA)) | ||||
|         elif i >= 2:  # Add more poses as necessary | ||||
|             pose_i = scenario.pose(t) | ||||
|             initialEstimate.insert(X(i), pose_i.compose(DELTA)) | ||||
| 
 | ||||
|         if i > 0: | ||||
|             # Add Bias variables periodically | ||||
|             if i % 5 == 0: | ||||
|                 biasKey += 1 | ||||
|                 factor = gtsam.BetweenFactorConstantBias( | ||||
|                     biasKey - 1, biasKey, gtsam.imuBias_ConstantBias(), BIAS_COVARIANCE) | ||||
|                 newgraph.add(factor) | ||||
|                 initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias()) | ||||
| 
 | ||||
|             # Predict acceleration and gyro measurements in (actual) body frame | ||||
|             nRb = scenario.rotation(t).matrix() | ||||
|             bRn = np.transpose(nRb) | ||||
|             measuredAcc = scenario.acceleration_b(t) - np.dot(bRn, n_gravity) | ||||
|             measuredOmega = scenario.omega_b(t) | ||||
|             accum.integrateMeasurement(measuredAcc, measuredOmega, delta_t) | ||||
| 
 | ||||
|             # Add Imu Factor | ||||
|             imufac = gtsam.ImuFactor( | ||||
|                 X(i - 1), V(i - 1), X(i), V(i), biasKey, accum) | ||||
|             newgraph.add(imufac) | ||||
| 
 | ||||
|             # insert new velocity, which is wrong | ||||
|             initialEstimate.insert(V(i), n_velocity) | ||||
|             accum.resetIntegration() | ||||
| 
 | ||||
|         # Incremental solution | ||||
|         isam.update(newgraph, initialEstimate) | ||||
|         result = isam.calculateEstimate() | ||||
|         ISAM2_plot(result) | ||||
| 
 | ||||
|         # reset | ||||
|         newgraph = gtsam.NonlinearFactorGraph() | ||||
|         initialEstimate.clear() | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     IMU_example() | ||||
|  | @ -1,116 +0,0 @@ | |||
| """Various plotting utlities.""" | ||||
| 
 | ||||
| import numpy as np | ||||
| import matplotlib.pyplot as plt | ||||
| from matplotlib import patches | ||||
| 
 | ||||
| 
 | ||||
| def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None): | ||||
|     """Plot a 2D pose on given axis 'axes' with given 'axis_length'.""" | ||||
|     # get rotation and translation (center) | ||||
|     gRp = pose.rotation().matrix()  # rotation from pose to global | ||||
|     t = pose.translation() | ||||
|     origin = np.array([t.x(), t.y()]) | ||||
| 
 | ||||
|     # draw the camera axes | ||||
|     x_axis = origin + gRp[:, 0] * axis_length | ||||
|     line = np.append(origin[np.newaxis], x_axis[np.newaxis], axis=0) | ||||
|     axes.plot(line[:, 0], line[:, 1], 'r-') | ||||
| 
 | ||||
|     y_axis = origin + gRp[:, 1] * axis_length | ||||
|     line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0) | ||||
|     axes.plot(line[:, 0], line[:, 1], 'g-') | ||||
| 
 | ||||
|     if covariance is not None: | ||||
|         pPp = covariance[0:2, 0:2] | ||||
|         gPp = np.matmul(np.matmul(gRp, pPp), gRp.T) | ||||
| 
 | ||||
|         w, v = np.linalg.eig(gPp) | ||||
| 
 | ||||
|         # k = 2.296 | ||||
|         k = 5.0 | ||||
| 
 | ||||
|         angle = np.arctan2(v[1, 0], v[0, 0]) | ||||
|         e1 = patches.Ellipse(origin, np.sqrt(w[0]*k), np.sqrt(w[1]*k), | ||||
|                              np.rad2deg(angle), fill=False) | ||||
|         axes.add_patch(e1) | ||||
| 
 | ||||
| def plot_pose2(fignum, pose, axis_length=0.1, covariance=None): | ||||
|     """Plot a 2D pose on given figure with given 'axis_length'.""" | ||||
|     # get figure object | ||||
|     fig = plt.figure(fignum) | ||||
|     axes = fig.gca() | ||||
|     plot_pose2_on_axes(axes, pose, axis_length, covariance) | ||||
| 
 | ||||
| 
 | ||||
| def plot_point3_on_axes(axes, point, linespec): | ||||
|     """Plot a 3D point on given axis 'axes' with given 'linespec'.""" | ||||
|     axes.plot([point.x()], [point.y()], [point.z()], linespec) | ||||
| 
 | ||||
| 
 | ||||
| def plot_point3(fignum, point, linespec): | ||||
|     """Plot a 3D point on given figure with given 'linespec'.""" | ||||
|     fig = plt.figure(fignum) | ||||
|     axes = fig.gca(projection='3d') | ||||
|     plot_point3_on_axes(axes, point, linespec) | ||||
| 
 | ||||
| 
 | ||||
| def plot_3d_points(fignum, values, linespec, marginals=None): | ||||
|     """ | ||||
|     Plots the Point3s in 'values', with optional covariances. | ||||
|     Finds all the Point3 objects in the given Values object and plots them. | ||||
|     If a Marginals object is given, this function will also plot marginal | ||||
|     covariance ellipses for each point. | ||||
|     """ | ||||
| 
 | ||||
|     keys = values.keys() | ||||
| 
 | ||||
|     # Plot points and covariance matrices | ||||
|     for i in range(keys.size()): | ||||
|         try: | ||||
|             p = values.atPoint3(keys.at(i)) | ||||
|             # if haveMarginals | ||||
|             #     P = marginals.marginalCovariance(key); | ||||
|             #     gtsam.plot_point3(p, linespec, P); | ||||
|             # else | ||||
|             plot_point3(fignum, p, linespec) | ||||
|         except RuntimeError: | ||||
|             continue | ||||
|             # I guess it's not a Point3 | ||||
| 
 | ||||
| 
 | ||||
| def plot_pose3_on_axes(axes, pose, axis_length=0.1): | ||||
|     """Plot a 3D pose on given axis 'axes' with given 'axis_length'.""" | ||||
|     # get rotation and translation (center) | ||||
|     gRp = pose.rotation().matrix()  # rotation from pose to global | ||||
|     t = pose.translation() | ||||
|     origin = np.array([t.x(), t.y(), t.z()]) | ||||
| 
 | ||||
|     # draw the camera axes | ||||
|     x_axis = origin + gRp[:, 0] * axis_length | ||||
|     line = np.append(origin[np.newaxis], x_axis[np.newaxis], axis=0) | ||||
|     axes.plot(line[:, 0], line[:, 1], line[:, 2], 'r-') | ||||
| 
 | ||||
|     y_axis = origin + gRp[:, 1] * axis_length | ||||
|     line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0) | ||||
|     axes.plot(line[:, 0], line[:, 1], line[:, 2], 'g-') | ||||
| 
 | ||||
|     z_axis = origin + gRp[:, 2] * axis_length | ||||
|     line = np.append(origin[np.newaxis], z_axis[np.newaxis], axis=0) | ||||
|     axes.plot(line[:, 0], line[:, 1], line[:, 2], 'b-') | ||||
| 
 | ||||
|     # plot the covariance | ||||
|     # TODO (dellaert): make this work | ||||
|     # if (nargin>2) && (~isempty(P)) | ||||
|     #     pPp = P(4:6,4:6); % covariance matrix in pose coordinate frame | ||||
|     #     gPp = gRp*pPp*gRp'; % convert the covariance matrix to global coordinate frame | ||||
|     #     gtsam.covarianceEllipse3D(origin,gPp); | ||||
|     # end | ||||
| 
 | ||||
| 
 | ||||
| def plot_pose3(fignum, pose, axis_length=0.1): | ||||
|     """Plot a 3D pose on given figure with given 'axis_length'.""" | ||||
|     # get figure object | ||||
|     fig = plt.figure(fignum) | ||||
|     axes = fig.gca(projection='3d') | ||||
|     plot_pose3_on_axes(axes, pose, axis_length) | ||||
|  | @ -1,49 +0,0 @@ | |||
| include(GtsamCythonWrap) | ||||
| 
 | ||||
| # Copy eigency's sources to the build folder | ||||
| # so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core | ||||
| # and eigency's cython pxd headers can be found when cythonizing gtsam | ||||
| file(COPY "." DESTINATION ".") | ||||
| set(OUTPUT_DIR "${PROJECT_BINARY_DIR}/cython/gtsam_eigency") | ||||
| set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR}) | ||||
| 
 | ||||
| # This is to make the build/cython/gtsam_eigency folder a python package | ||||
| configure_file(__init__.py.in ${PROJECT_BINARY_DIR}/cython/gtsam_eigency/__init__.py) | ||||
| 
 | ||||
| # include eigency headers | ||||
| include_directories(${EIGENCY_INCLUDE_DIR}) | ||||
| 
 | ||||
| # Cythonize and build eigency | ||||
| message(STATUS "Cythonize and build eigency") | ||||
| # Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is | ||||
| # a part of the gtsam_eigency package and generate the function call import_gtsam_igency__conversions() | ||||
| # in conversions_api.h correctly!!! | ||||
| cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions" | ||||
|   "${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "") | ||||
| cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core" | ||||
|   ${OUTPUT_DIR} "${EIGENCY_INCLUDE_DIR}" "" "" "") | ||||
| 
 | ||||
| # Include Eigen headers: | ||||
| target_include_directories(cythonize_eigency_conversions PUBLIC | ||||
|   $<BUILD_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_BUILD}> | ||||
|   $<INSTALL_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}> | ||||
| ) | ||||
| target_include_directories(cythonize_eigency_core PUBLIC | ||||
|   $<BUILD_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_BUILD}> | ||||
|   $<INSTALL_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}> | ||||
| ) | ||||
| 
 | ||||
| add_dependencies(cythonize_eigency_core cythonize_eigency_conversions) | ||||
| add_custom_target(cythonize_eigency) | ||||
| add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core) | ||||
| 
 | ||||
| # install | ||||
| install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} | ||||
|         DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}" | ||||
|         PATTERN "CMakeLists.txt" EXCLUDE | ||||
|         PATTERN "__init__.py.in" EXCLUDE) | ||||
| install(TARGETS cythonize_eigency_core cythonize_eigency_conversions | ||||
|         DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency") | ||||
| install(FILES ${OUTPUT_DIR}/conversions_api.h DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency) | ||||
| configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py) | ||||
| install(FILES ${OUTPUT_DIR}/__init__.py DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency) | ||||
|  | @ -1,20 +0,0 @@ | |||
| Copyright (c) 2016 Wouter Boomsma | ||||
| 
 | ||||
| Permission is hereby granted, free of charge, to any person obtaining | ||||
| a copy of this software and associated documentation files (the | ||||
| "Software"), to deal in the Software without restriction, including | ||||
| without limitation the rights to use, copy, modify, merge, publish, | ||||
| distribute, sublicense, and/or sell copies of the Software, and to | ||||
| permit persons to whom the Software is furnished to do so, subject to | ||||
| the following conditions: | ||||
| 
 | ||||
| The above copyright notice and this permission notice shall be | ||||
| included in all copies or substantial portions of the Software. | ||||
| 
 | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
| EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
| MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
| NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE | ||||
| LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||||
| OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||||
| WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||
|  | @ -1,13 +0,0 @@ | |||
| import os | ||||
| import numpy as np | ||||
| 
 | ||||
| __eigen_dir__ = "${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}" | ||||
| 
 | ||||
| def get_includes(include_eigen=True): | ||||
|     root = os.path.dirname(__file__) | ||||
|     parent = os.path.join(root, "..") | ||||
|     path = [root, parent, np.get_include()] | ||||
|     if include_eigen: | ||||
|         path.append(os.path.join(root, __eigen_dir__)) | ||||
|     return path | ||||
| 
 | ||||
|  | @ -1,62 +0,0 @@ | |||
| cimport numpy as np | ||||
| 
 | ||||
| cdef api np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| 
 | ||||
| cdef api np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| 
 | ||||
| cdef api np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| 
 | ||||
| cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| 
 | ||||
| cdef api np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| 
 | ||||
| cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| 
 | ||||
| cdef api np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| 
 | ||||
| cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| 
 | ||||
| cdef api np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| 
 | ||||
| cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| 
 | ||||
| cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| 
 | ||||
| cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) | ||||
| 
 | ||||
|  | @ -1,327 +0,0 @@ | |||
| cimport cython | ||||
| import numpy as np | ||||
| from numpy.lib.stride_tricks import as_strided | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef double[:,:] mem_view = <double[:rows,:cols]>data | ||||
|     dtype = 'double' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef double[::1,:] mem_view = <double[:rows:1,:cols]>data | ||||
|     dtype = 'double' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef double[:,:] mem_view = <double[:rows,:cols]>data | ||||
|     dtype = 'double' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef double[::1,:] mem_view = <double[:rows:1,:cols]>data | ||||
|     dtype = 'double' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| 
 | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef float[:,:] mem_view = <float[:rows,:cols]>data | ||||
|     dtype = 'float' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef float[::1,:] mem_view = <float[:rows:1,:cols]>data | ||||
|     dtype = 'float' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef float[:,:] mem_view = <float[:rows,:cols]>data | ||||
|     dtype = 'float' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef float[::1,:] mem_view = <float[:rows:1,:cols]>data | ||||
|     dtype = 'float' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| 
 | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef long[:,:] mem_view = <long[:rows,:cols]>data | ||||
|     dtype = 'int_' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef long[::1,:] mem_view = <long[:rows:1,:cols]>data | ||||
|     dtype = 'int_' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef long[:,:] mem_view = <long[:rows,:cols]>data | ||||
|     dtype = 'int_' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef long[::1,:] mem_view = <long[:rows:1,:cols]>data | ||||
|     dtype = 'int_' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| 
 | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned long[:,:] mem_view = <unsigned long[:rows,:cols]>data | ||||
|     dtype = 'uint' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned long[::1,:] mem_view = <unsigned long[:rows:1,:cols]>data | ||||
|     dtype = 'uint' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned long[:,:] mem_view = <unsigned long[:rows,:cols]>data | ||||
|     dtype = 'uint' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned long[::1,:] mem_view = <unsigned long[:rows:1,:cols]>data | ||||
|     dtype = 'uint' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| 
 | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef int[:,:] mem_view = <int[:rows,:cols]>data | ||||
|     dtype = 'int' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef int[::1,:] mem_view = <int[:rows:1,:cols]>data | ||||
|     dtype = 'int' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef int[:,:] mem_view = <int[:rows,:cols]>data | ||||
|     dtype = 'int' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef int[::1,:] mem_view = <int[:rows:1,:cols]>data | ||||
|     dtype = 'int' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| 
 | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned int[:,:] mem_view = <unsigned int[:rows,:cols]>data | ||||
|     dtype = 'uint' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned int[::1,:] mem_view = <unsigned int[:rows:1,:cols]>data | ||||
|     dtype = 'uint' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned int[:,:] mem_view = <unsigned int[:rows,:cols]>data | ||||
|     dtype = 'uint' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned int[::1,:] mem_view = <unsigned int[:rows:1,:cols]>data | ||||
|     dtype = 'uint' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| 
 | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef short[:,:] mem_view = <short[:rows,:cols]>data | ||||
|     dtype = 'short' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef short[::1,:] mem_view = <short[:rows:1,:cols]>data | ||||
|     dtype = 'short' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef short[:,:] mem_view = <short[:rows,:cols]>data | ||||
|     dtype = 'short' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef short[::1,:] mem_view = <short[:rows:1,:cols]>data | ||||
|     dtype = 'short' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| 
 | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned short[:,:] mem_view = <unsigned short[:rows,:cols]>data | ||||
|     dtype = 'ushort' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned short[::1,:] mem_view = <unsigned short[:rows:1,:cols]>data | ||||
|     dtype = 'ushort' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned short[:,:] mem_view = <unsigned short[:rows,:cols]>data | ||||
|     dtype = 'ushort' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned short[::1,:] mem_view = <unsigned short[:rows:1,:cols]>data | ||||
|     dtype = 'ushort' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| 
 | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef signed char[:,:] mem_view = <signed char[:rows,:cols]>data | ||||
|     dtype = 'int8' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef signed char[::1,:] mem_view = <signed char[:rows:1,:cols]>data | ||||
|     dtype = 'int8' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef signed char[:,:] mem_view = <signed char[:rows,:cols]>data | ||||
|     dtype = 'int8' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef signed char[::1,:] mem_view = <signed char[:rows:1,:cols]>data | ||||
|     dtype = 'int8' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| 
 | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned char[:,:] mem_view = <unsigned char[:rows,:cols]>data | ||||
|     dtype = 'uint8' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned char[::1,:] mem_view = <unsigned char[:rows:1,:cols]>data | ||||
|     dtype = 'uint8' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned char[:,:] mem_view = <unsigned char[:rows,:cols]>data | ||||
|     dtype = 'uint8' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef unsigned char[::1,:] mem_view = <unsigned char[:rows:1,:cols]>data | ||||
|     dtype = 'uint8' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| 
 | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef np.complex128_t[:,:] mem_view = <np.complex128_t[:rows,:cols]>data | ||||
|     dtype = 'complex128' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef np.complex128_t[::1,:] mem_view = <np.complex128_t[:rows:1,:cols]>data | ||||
|     dtype = 'complex128' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef np.complex128_t[:,:] mem_view = <np.complex128_t[:rows,:cols]>data | ||||
|     dtype = 'complex128' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef np.complex128_t[::1,:] mem_view = <np.complex128_t[:rows:1,:cols]>data | ||||
|     dtype = 'complex128' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| 
 | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef np.complex64_t[:,:] mem_view = <np.complex64_t[:rows,:cols]>data | ||||
|     dtype = 'complex64' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef np.complex64_t[::1,:] mem_view = <np.complex64_t[:rows:1,:cols]>data | ||||
|     dtype = 'complex64' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) | ||||
| 
 | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef np.complex64_t[:,:] mem_view = <np.complex64_t[:rows,:cols]>data | ||||
|     dtype = 'complex64' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| @cython.boundscheck(False) | ||||
| cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): | ||||
|     cdef np.complex64_t[::1,:] mem_view = <np.complex64_t[:rows:1,:cols]>data | ||||
|     dtype = 'complex64' | ||||
|     cdef int itemsize = np.dtype(dtype).itemsize | ||||
|     return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) | ||||
| 
 | ||||
|  | @ -1,917 +0,0 @@ | |||
| cimport cython | ||||
| cimport numpy as np | ||||
| 
 | ||||
| ctypedef signed char schar; | ||||
| ctypedef unsigned char uchar; | ||||
| 
 | ||||
| ctypedef fused dtype: | ||||
|     uchar | ||||
|     schar | ||||
|     short | ||||
|     int | ||||
|     long | ||||
|     float | ||||
|     double | ||||
| 
 | ||||
| ctypedef fused DenseType: | ||||
|     Matrix | ||||
|     Array | ||||
| 
 | ||||
| ctypedef fused Rows: | ||||
|     _1 | ||||
|     _2 | ||||
|     _3 | ||||
|     _4 | ||||
|     _5 | ||||
|     _6 | ||||
|     _7 | ||||
|     _8 | ||||
|     _9 | ||||
|     _10 | ||||
|     _11 | ||||
|     _12 | ||||
|     _13 | ||||
|     _14 | ||||
|     _15 | ||||
|     _16 | ||||
|     _17 | ||||
|     _18 | ||||
|     _19 | ||||
|     _20 | ||||
|     _21 | ||||
|     _22 | ||||
|     _23 | ||||
|     _24 | ||||
|     _25 | ||||
|     _26 | ||||
|     _27 | ||||
|     _28 | ||||
|     _29 | ||||
|     _30 | ||||
|     _31 | ||||
|     _32 | ||||
|     Dynamic | ||||
| 
 | ||||
| ctypedef Rows Cols | ||||
| ctypedef Rows StrideOuter | ||||
| ctypedef Rows StrideInner | ||||
| 
 | ||||
| ctypedef fused DenseTypeShort: | ||||
|     Vector1i | ||||
|     Vector2i | ||||
|     Vector3i | ||||
|     Vector4i | ||||
|     VectorXi | ||||
|     RowVector1i | ||||
|     RowVector2i | ||||
|     RowVector3i | ||||
|     RowVector4i | ||||
|     RowVectorXi | ||||
|     Matrix1i | ||||
|     Matrix2i | ||||
|     Matrix3i | ||||
|     Matrix4i | ||||
|     MatrixXi | ||||
|     Vector1f | ||||
|     Vector2f | ||||
|     Vector3f | ||||
|     Vector4f | ||||
|     VectorXf | ||||
|     RowVector1f | ||||
|     RowVector2f | ||||
|     RowVector3f | ||||
|     RowVector4f | ||||
|     RowVectorXf | ||||
|     Matrix1f | ||||
|     Matrix2f | ||||
|     Matrix3f | ||||
|     Matrix4f | ||||
|     MatrixXf | ||||
|     Vector1d | ||||
|     Vector2d | ||||
|     Vector3d | ||||
|     Vector4d | ||||
|     VectorXd | ||||
|     RowVector1d | ||||
|     RowVector2d | ||||
|     RowVector3d | ||||
|     RowVector4d | ||||
|     RowVectorXd | ||||
|     Matrix1d | ||||
|     Matrix2d | ||||
|     Matrix3d | ||||
|     Matrix4d | ||||
|     MatrixXd | ||||
|     Vector1cf | ||||
|     Vector2cf | ||||
|     Vector3cf | ||||
|     Vector4cf | ||||
|     VectorXcf | ||||
|     RowVector1cf | ||||
|     RowVector2cf | ||||
|     RowVector3cf | ||||
|     RowVector4cf | ||||
|     RowVectorXcf | ||||
|     Matrix1cf | ||||
|     Matrix2cf | ||||
|     Matrix3cf | ||||
|     Matrix4cf | ||||
|     MatrixXcf | ||||
|     Vector1cd | ||||
|     Vector2cd | ||||
|     Vector3cd | ||||
|     Vector4cd | ||||
|     VectorXcd | ||||
|     RowVector1cd | ||||
|     RowVector2cd | ||||
|     RowVector3cd | ||||
|     RowVector4cd | ||||
|     RowVectorXcd | ||||
|     Matrix1cd | ||||
|     Matrix2cd | ||||
|     Matrix3cd | ||||
|     Matrix4cd | ||||
|     MatrixXcd | ||||
|     Array22i | ||||
|     Array23i | ||||
|     Array24i | ||||
|     Array2Xi | ||||
|     Array32i | ||||
|     Array33i | ||||
|     Array34i | ||||
|     Array3Xi | ||||
|     Array42i | ||||
|     Array43i | ||||
|     Array44i | ||||
|     Array4Xi | ||||
|     ArrayX2i | ||||
|     ArrayX3i | ||||
|     ArrayX4i | ||||
|     ArrayXXi | ||||
|     Array2i | ||||
|     Array3i | ||||
|     Array4i | ||||
|     ArrayXi | ||||
|     Array22f | ||||
|     Array23f | ||||
|     Array24f | ||||
|     Array2Xf | ||||
|     Array32f | ||||
|     Array33f | ||||
|     Array34f | ||||
|     Array3Xf | ||||
|     Array42f | ||||
|     Array43f | ||||
|     Array44f | ||||
|     Array4Xf | ||||
|     ArrayX2f | ||||
|     ArrayX3f | ||||
|     ArrayX4f | ||||
|     ArrayXXf | ||||
|     Array2f | ||||
|     Array3f | ||||
|     Array4f | ||||
|     ArrayXf | ||||
|     Array22d | ||||
|     Array23d | ||||
|     Array24d | ||||
|     Array2Xd | ||||
|     Array32d | ||||
|     Array33d | ||||
|     Array34d | ||||
|     Array3Xd | ||||
|     Array42d | ||||
|     Array43d | ||||
|     Array44d | ||||
|     Array4Xd | ||||
|     ArrayX2d | ||||
|     ArrayX3d | ||||
|     ArrayX4d | ||||
|     ArrayXXd | ||||
|     Array2d | ||||
|     Array3d | ||||
|     Array4d | ||||
|     ArrayXd | ||||
|     Array22cf | ||||
|     Array23cf | ||||
|     Array24cf | ||||
|     Array2Xcf | ||||
|     Array32cf | ||||
|     Array33cf | ||||
|     Array34cf | ||||
|     Array3Xcf | ||||
|     Array42cf | ||||
|     Array43cf | ||||
|     Array44cf | ||||
|     Array4Xcf | ||||
|     ArrayX2cf | ||||
|     ArrayX3cf | ||||
|     ArrayX4cf | ||||
|     ArrayXXcf | ||||
|     Array2cf | ||||
|     Array3cf | ||||
|     Array4cf | ||||
|     ArrayXcf | ||||
|     Array22cd | ||||
|     Array23cd | ||||
|     Array24cd | ||||
|     Array2Xcd | ||||
|     Array32cd | ||||
|     Array33cd | ||||
|     Array34cd | ||||
|     Array3Xcd | ||||
|     Array42cd | ||||
|     Array43cd | ||||
|     Array44cd | ||||
|     Array4Xcd | ||||
|     ArrayX2cd | ||||
|     ArrayX3cd | ||||
|     ArrayX4cd | ||||
|     ArrayXXcd | ||||
|     Array2cd | ||||
|     Array3cd | ||||
|     Array4cd | ||||
|     ArrayXcd | ||||
| 
 | ||||
| ctypedef fused StorageOrder: | ||||
|     RowMajor | ||||
|     ColMajor | ||||
| 
 | ||||
| ctypedef fused MapOptions: | ||||
|     Aligned | ||||
|     Unaligned | ||||
| 
 | ||||
| cdef extern from "eigency_cpp.h" namespace "eigency": | ||||
| 
 | ||||
|      cdef cppclass _1 "1": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _2 "2": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _3 "3": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _4 "4": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _5 "5": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _6 "6": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _7 "7": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _8 "8": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _9 "9": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _10 "10": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _11 "11": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _12 "12": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _13 "13": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _14 "14": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _15 "15": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _16 "16": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _17 "17": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _18 "18": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _19 "19": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _20 "20": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _21 "21": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _22 "22": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _23 "23": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _24 "24": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _25 "25": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _26 "26": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _27 "27": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _28 "28": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _29 "29": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _30 "30": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass _31 "31": | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass _32 "32": | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass PlainObjectBase: | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Map[DenseTypeShort](PlainObjectBase): | ||||
|          Map() except + | ||||
|          Map(np.ndarray array) except + | ||||
| 
 | ||||
|      cdef cppclass FlattenedMap[DenseType, dtype, Rows, Cols]: | ||||
|          FlattenedMap() except + | ||||
|          FlattenedMap(np.ndarray array) except + | ||||
| 
 | ||||
|      cdef cppclass FlattenedMapWithOrder "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder]: | ||||
|          FlattenedMapWithOrder() except + | ||||
|          FlattenedMapWithOrder(np.ndarray array) except + | ||||
| 
 | ||||
|      cdef cppclass FlattenedMapWithStride "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder, MapOptions, StrideOuter, StrideInner]: | ||||
|          FlattenedMapWithStride() except + | ||||
|          FlattenedMapWithStride(np.ndarray array) except + | ||||
| 
 | ||||
|      cdef np.ndarray ndarray_view(PlainObjectBase &) | ||||
|      cdef np.ndarray ndarray_copy(PlainObjectBase &) | ||||
|      cdef np.ndarray ndarray(PlainObjectBase &) | ||||
| 
 | ||||
| 
 | ||||
| cdef extern from "eigency_cpp.h" namespace "Eigen": | ||||
| 
 | ||||
|      cdef cppclass Dynamic: | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowMajor: | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass ColMajor: | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Aligned: | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Unaligned: | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass VectorXd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Vector1i(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector2i(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector3i(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector4i(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass VectorXi(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector1i(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector2i(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector3i(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector4i(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVectorXi(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix1i(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix2i(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix3i(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix4i(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass MatrixXi(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector1f(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector2f(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector3f(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector4f(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass VectorXf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector1f(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector2f(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector3f(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector4f(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVectorXf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix1f(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix2f(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix3f(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix4f(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass MatrixXf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector1d(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector2d(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector3d(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector4d(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass VectorXd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector1d(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector2d(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector3d(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector4d(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVectorXd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix1d(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix2d(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix3d(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix4d(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass MatrixXd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector1cf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector2cf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector3cf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector4cf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass VectorXcf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector1cf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector2cf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector3cf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector4cf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVectorXcf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix1cf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix2cf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix3cf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix4cf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass MatrixXcf(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector1cd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector2cd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector3cd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Vector4cd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass VectorXcd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector1cd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector2cd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector3cd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVector4cd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass RowVectorXcd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix1cd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix2cd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix3cd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Matrix4cd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass MatrixXcd(PlainObjectBase): | ||||
|           pass | ||||
| 
 | ||||
|      cdef cppclass Array22i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array23i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array24i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array2Xi(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array32i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array33i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array34i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array3Xi(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array42i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array43i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array44i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array4Xi(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX2i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX3i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX4i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayXXi(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array2i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array3i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array4i(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayXi(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array22f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array23f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array24f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array2Xf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array32f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array33f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array34f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array3Xf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array42f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array43f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array44f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array4Xf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX2f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX3f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX4f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayXXf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array2f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array3f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array4f(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayXf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array22d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array23d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array24d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array2Xd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array32d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array33d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array34d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array3Xd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array42d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array43d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array44d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array4Xd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX2d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX3d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX4d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayXXd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array2d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array3d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array4d(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayXd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array22cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array23cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array24cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array2Xcf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array32cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array33cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array34cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array3Xcf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array42cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array43cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array44cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array4Xcf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX2cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX3cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX4cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayXXcf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array2cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array3cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array4cf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayXcf(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array22cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array23cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array24cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array2Xcd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array32cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array33cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array34cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array3Xcd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array42cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array43cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array44cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array4Xcd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX2cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX3cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayX4cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayXXcd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array2cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array3cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass Array4cd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
|      cdef cppclass ArrayXcd(PlainObjectBase): | ||||
|           pass | ||||
|            | ||||
| 
 | ||||
|  | @ -1 +0,0 @@ | |||
| 
 | ||||
|  | @ -1,504 +0,0 @@ | |||
| #include <Eigen/Dense> | ||||
| 
 | ||||
| #include <iostream> | ||||
| #include <stdexcept> | ||||
| #include <complex> | ||||
| 
 | ||||
| typedef ::std::complex< double > __pyx_t_double_complex; | ||||
| typedef ::std::complex< float > __pyx_t_float_complex; | ||||
| 
 | ||||
| #include "conversions_api.h" | ||||
| 
 | ||||
| #ifndef EIGENCY_CPP | ||||
| #define EIGENCY_CPP | ||||
| 
 | ||||
| namespace eigency { | ||||
| 
 | ||||
| template<typename Scalar> | ||||
| inline PyArrayObject *_ndarray_view(Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0); | ||||
| template<typename Scalar> | ||||
| inline PyArrayObject *_ndarray_copy(const Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0); | ||||
| 
 | ||||
| // Strides:
 | ||||
| // Eigen and numpy differ in their way of dealing with strides. Eigen has the concept of outer and
 | ||||
| // inner strides, which are dependent on whether the array/matrix is row-major of column-major:
 | ||||
| //     Inner stride: denotes the offset between succeeding elements in each row (row-major) or column (column-major).
 | ||||
| //     Outer stride: denotes the offset between succeeding rows (row-major) or succeeding columns (column-major).
 | ||||
| // In contrast, numpy's stride is simply a measure of how fast each dimension should be incremented.
 | ||||
| // Consequently, a switch in numpy storage order from row-major to column-major involves a switch
 | ||||
| // in strides, while it does not affect the stride in Eigen.
 | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_view<double>(double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) { | ||||
|         // Eigen row-major mode: row_stride=outer_stride, and col_stride=inner_stride
 | ||||
|         // If no stride is given, the row_stride is set to the number of columns.
 | ||||
|         return ndarray_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     } else { | ||||
|         // Eigen column-major mode: row_stride=outer_stride, and col_stride=inner_stride
 | ||||
|         // If no stride is given, the cow_stride is set to the number of rows.
 | ||||
|         return ndarray_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
|     } | ||||
| } | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_copy<double>(const double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_copy_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_copy_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| 
 | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_view<float>(float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_copy<float>(const float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_copy_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_copy_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| 
 | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_view<long>(long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_copy<long>(const long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_copy_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_copy_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| 
 | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_view<unsigned long>(unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_copy<unsigned long>(const unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_copy_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_copy_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| 
 | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_view<int>(int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_copy<int>(const int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_copy_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_copy_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| 
 | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_view<unsigned int>(unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_copy<unsigned int>(const unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_copy_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_copy_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| 
 | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_view<short>(short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_copy<short>(const short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_copy_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_copy_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| 
 | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_view<unsigned short>(unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_copy<unsigned short>(const unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_copy_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_copy_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| 
 | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_view<signed char>(signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_copy<signed char>(const signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_copy_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_copy_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| 
 | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_view<unsigned char>(unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_copy<unsigned char>(const unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_copy_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_copy_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| 
 | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_view<std::complex<double> >(std::complex<double> *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_copy<std::complex<double> >(const std::complex<double> *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_copy_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_copy_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| 
 | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_view<std::complex<float> >(std::complex<float> *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| template<> | ||||
| inline PyArrayObject *_ndarray_copy<std::complex<float> >(const std::complex<float> *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { | ||||
|     if (is_row_major) | ||||
|         return ndarray_copy_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); | ||||
|     else | ||||
|         return ndarray_copy_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| template <typename Derived> | ||||
| inline PyArrayObject *ndarray(Eigen::PlainObjectBase<Derived> &m) { | ||||
|     import_gtsam_eigency__conversions(); | ||||
|     return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor); | ||||
| } | ||||
| // If C++11 is available, check if m is an r-value reference, in
 | ||||
| // which case a copy should always be made
 | ||||
| #if __cplusplus >= 201103L | ||||
| template <typename Derived> | ||||
| inline PyArrayObject *ndarray(Eigen::PlainObjectBase<Derived> &&m) { | ||||
|     import_gtsam_eigency__conversions(); | ||||
|     return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor); | ||||
| } | ||||
| #endif | ||||
| template <typename Derived> | ||||
| inline PyArrayObject *ndarray(const Eigen::PlainObjectBase<Derived> &m) { | ||||
|     import_gtsam_eigency__conversions(); | ||||
|     return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor); | ||||
| } | ||||
| template <typename Derived> | ||||
| inline PyArrayObject *ndarray_view(Eigen::PlainObjectBase<Derived> &m) { | ||||
|     import_gtsam_eigency__conversions(); | ||||
|     return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor); | ||||
| } | ||||
| template <typename Derived> | ||||
| inline PyArrayObject *ndarray_view(const Eigen::PlainObjectBase<Derived> &m) { | ||||
|     import_gtsam_eigency__conversions(); | ||||
|     return _ndarray_view(const_cast<typename Derived::Scalar*>(m.data()), m.rows(), m.cols(), m.IsRowMajor); | ||||
| } | ||||
| template <typename Derived> | ||||
| inline PyArrayObject *ndarray_copy(const Eigen::PlainObjectBase<Derived> &m) { | ||||
|     import_gtsam_eigency__conversions(); | ||||
|     return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor); | ||||
| } | ||||
| 
 | ||||
| template <typename Derived, int MapOptions, typename Stride> | ||||
| inline PyArrayObject *ndarray(Eigen::Map<Derived, MapOptions, Stride> &m) { | ||||
|     import_gtsam_eigency__conversions(); | ||||
|     return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); | ||||
| } | ||||
| template <typename Derived, int MapOptions, typename Stride> | ||||
| inline PyArrayObject *ndarray(const Eigen::Map<Derived, MapOptions, Stride> &m) { | ||||
|     import_gtsam_eigency__conversions(); | ||||
|     // Since this is a map, we assume that ownership is correctly taken care
 | ||||
|     // of, and we avoid taking a copy
 | ||||
|     return _ndarray_view(const_cast<typename Derived::Scalar*>(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); | ||||
| } | ||||
| template <typename Derived, int MapOptions, typename Stride> | ||||
| inline PyArrayObject *ndarray_view(Eigen::Map<Derived, MapOptions, Stride> &m) { | ||||
|     import_gtsam_eigency__conversions(); | ||||
|     return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); | ||||
| } | ||||
| template <typename Derived, int MapOptions, typename Stride> | ||||
| inline PyArrayObject *ndarray_view(const Eigen::Map<Derived, MapOptions, Stride> &m) { | ||||
|     import_gtsam_eigency__conversions(); | ||||
|     return _ndarray_view(const_cast<typename Derived::Scalar*>(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); | ||||
| } | ||||
| template <typename Derived, int MapOptions, typename Stride> | ||||
| inline PyArrayObject *ndarray_copy(const Eigen::Map<Derived, MapOptions, Stride> &m) { | ||||
|     import_gtsam_eigency__conversions(); | ||||
|     return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| template <typename MatrixType, | ||||
|           int _MapOptions = Eigen::Unaligned, | ||||
|           typename _StrideType=Eigen::Stride<0,0> > | ||||
| class MapBase: public Eigen::Map<MatrixType, _MapOptions, _StrideType> { | ||||
| public: | ||||
|     typedef Eigen::Map<MatrixType, _MapOptions, _StrideType> Base; | ||||
|     typedef typename Base::Scalar Scalar; | ||||
| 
 | ||||
|     MapBase(Scalar* data, | ||||
|             long rows, | ||||
|             long cols, | ||||
|             _StrideType stride=_StrideType()) | ||||
|         : Base(data, | ||||
|                // If both dimensions are dynamic or dimensions match, accept dimensions as they are
 | ||||
|                ((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) || | ||||
|                 (Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols)) | ||||
|                ? rows | ||||
|                // otherwise, test if swapping them makes them fit
 | ||||
|                : ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows) | ||||
|                   ? cols | ||||
|                   : rows), | ||||
|                ((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) || | ||||
|                 (Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols)) | ||||
|                ? cols | ||||
|                : ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows) | ||||
|                   ? rows | ||||
|                   : cols), | ||||
|                stride | ||||
|             )  {} | ||||
| 
 | ||||
|     MapBase &operator=(const MatrixType &other) { | ||||
|         Base::operator=(other); | ||||
|         return *this; | ||||
|     } | ||||
| 
 | ||||
|     virtual ~MapBase() { } | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| template <template<class,int,int,int,int,int> class EigencyDenseBase, | ||||
|           typename Scalar, | ||||
|           int _Rows, int _Cols, | ||||
|           int _Options = Eigen::AutoAlign | | ||||
| #if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4 | ||||
|     // workaround a bug in at least gcc 3.4.6
 | ||||
|     // the innermost ?: ternary operator is misparsed. We write it slightly
 | ||||
|     // differently and this makes gcc 3.4.6 happy, but it's ugly.
 | ||||
|     // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
 | ||||
|     // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
 | ||||
|                           ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor | ||||
| // EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19
 | ||||
| #if EIGEN_VERSION_AT_LEAST(3,2,90) | ||||
|                           : !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION | ||||
| #else | ||||
|                           : !(_Cols==1 && _Rows!=1) ? Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION | ||||
| #endif | ||||
|                           : ColMajor ), | ||||
| #else | ||||
|                           ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor | ||||
|                           : (_Cols==1 && _Rows!=1) ? Eigen::ColMajor | ||||
| // EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19
 | ||||
| #if EIGEN_VERSION_AT_LEAST(3,2,90) | ||||
|                           : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ), | ||||
| #else | ||||
|                           : Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ), | ||||
| #endif | ||||
| #endif | ||||
|           int _MapOptions = Eigen::Unaligned, | ||||
|           int _StrideOuter=0, int _StrideInner=0, | ||||
|           int _MaxRows = _Rows, | ||||
|           int _MaxCols = _Cols> | ||||
| class FlattenedMap: public MapBase<EigencyDenseBase<Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> >  { | ||||
| public: | ||||
|     typedef MapBase<EigencyDenseBase<Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > Base; | ||||
| 
 | ||||
|     FlattenedMap() | ||||
|         : Base(NULL, 0, 0), | ||||
|           object_(NULL) {} | ||||
| 
 | ||||
|     FlattenedMap(Scalar *data, long rows, long cols, long outer_stride=0, long inner_stride=0) | ||||
|         : Base(data, rows, cols, | ||||
|                Eigen::Stride<_StrideOuter, _StrideInner>(outer_stride, inner_stride)), | ||||
|           object_(NULL) { | ||||
|     } | ||||
| 
 | ||||
|     FlattenedMap(PyArrayObject *object) | ||||
|         : Base((Scalar *)((PyArrayObject*)object)->data, | ||||
|         // : Base(_from_numpy<Scalar>((PyArrayObject*)object),
 | ||||
|                (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1, | ||||
|                (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0], | ||||
|                Eigen::Stride<_StrideOuter, _StrideInner>(_StrideOuter != Eigen::Dynamic ? _StrideOuter : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1, | ||||
|                                                          _StrideInner != Eigen::Dynamic ? _StrideInner : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0])), | ||||
|           object_(object) { | ||||
| 
 | ||||
|         if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object)) | ||||
|             throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map."); | ||||
| 
 | ||||
|         Py_XINCREF(object_); | ||||
|     } | ||||
|     FlattenedMap &operator=(const FlattenedMap &other) { | ||||
|         if (other.object_) { | ||||
|             new (this) FlattenedMap(other.object_); | ||||
|         } else { | ||||
|             // Replace the memory that we point to (not a memory allocation)
 | ||||
|             new (this) FlattenedMap(const_cast<Scalar*>(other.data()), | ||||
|                                     other.rows(), | ||||
|                                     other.cols(), | ||||
|                                     other.outerStride(), | ||||
|                                     other.innerStride()); | ||||
|         } | ||||
| 
 | ||||
|         return *this; | ||||
|     } | ||||
| 
 | ||||
|     operator Base() const { | ||||
|         return static_cast<Base>(*this); | ||||
|     } | ||||
| 
 | ||||
|     operator Base&() const { | ||||
|         return static_cast<Base&>(*this); | ||||
|     } | ||||
| 
 | ||||
|     operator EigencyDenseBase<Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>() const { | ||||
|         return EigencyDenseBase<Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>(static_cast<Base>(*this)); | ||||
|     } | ||||
| 
 | ||||
|     virtual ~FlattenedMap() { | ||||
|         Py_XDECREF(object_); | ||||
|     } | ||||
| 
 | ||||
| private: | ||||
|     PyArrayObject * const object_; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| template <typename MatrixType> | ||||
| class Map: public MapBase<MatrixType> { | ||||
| public: | ||||
|     typedef MapBase<MatrixType> Base; | ||||
|     typedef typename MatrixType::Scalar Scalar; | ||||
| 
 | ||||
|     enum { | ||||
|         RowsAtCompileTime = Base::Base::RowsAtCompileTime, | ||||
|         ColsAtCompileTime = Base::Base::ColsAtCompileTime | ||||
|     }; | ||||
| 
 | ||||
|     Map() | ||||
|         : Base(NULL, | ||||
|                (RowsAtCompileTime == Eigen::Dynamic) ? 0 : RowsAtCompileTime, | ||||
|                (ColsAtCompileTime == Eigen::Dynamic) ? 0 : ColsAtCompileTime), | ||||
|           object_(NULL) { | ||||
|     } | ||||
| 
 | ||||
|     Map(Scalar *data, long rows, long cols) | ||||
|         : Base(data, rows, cols), | ||||
|           object_(NULL) {} | ||||
| 
 | ||||
|     Map(PyArrayObject *object) | ||||
|         : Base((PyObject*)object == Py_None? NULL: (Scalar *)object->data, | ||||
|                // ROW: If array is in row-major order, transpose (see README)
 | ||||
|                (PyObject*)object == Py_None? 0 : | ||||
|                (!PyArray_IS_F_CONTIGUOUS(object) | ||||
|                 ? ((object->nd == 1) | ||||
|                    ? 1  // ROW: If 1D row-major numpy array, set to 1 (row vector)
 | ||||
|                    : object->dimensions[1]) | ||||
|                 : object->dimensions[0]), | ||||
|                // COLUMN: If array is in row-major order: transpose (see README)
 | ||||
|                (PyObject*)object == Py_None? 0 : | ||||
|                (!PyArray_IS_F_CONTIGUOUS(object) | ||||
|                 ? object->dimensions[0] | ||||
|                 : ((object->nd == 1) | ||||
|                    ? 1  // COLUMN: If 1D col-major numpy array, set to length (column vector)
 | ||||
|                    : object->dimensions[1]))), | ||||
|           object_(object) { | ||||
| 
 | ||||
|         if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object)) | ||||
|             throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map."); | ||||
|         Py_XINCREF(object_); | ||||
|     } | ||||
| 
 | ||||
|     Map &operator=(const Map &other) { | ||||
|         if (other.object_) { | ||||
|             new (this) Map(other.object_); | ||||
|         } else { | ||||
|             // Replace the memory that we point to (not a memory allocation)
 | ||||
|             new (this) Map(const_cast<Scalar*>(other.data()), | ||||
|                           other.rows(), | ||||
|                           other.cols()); | ||||
|         } | ||||
| 
 | ||||
|         return *this; | ||||
|     } | ||||
| 
 | ||||
|     Map &operator=(const MatrixType &other) { | ||||
|         MapBase<MatrixType>::operator=(other); | ||||
|         return *this; | ||||
|     } | ||||
| 
 | ||||
|     operator Base() const { | ||||
|         return static_cast<Base>(*this); | ||||
|     } | ||||
| 
 | ||||
|     operator Base&() const { | ||||
|         return static_cast<Base&>(*this); | ||||
|     } | ||||
| 
 | ||||
|     operator MatrixType() const { | ||||
|         return MatrixType(static_cast<Base>(*this)); | ||||
|     } | ||||
| 
 | ||||
|     virtual ~Map() { | ||||
|         Py_XDECREF(object_); | ||||
|     } | ||||
| 
 | ||||
| private: | ||||
|     PyArrayObject * const object_; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
|  | @ -1,3 +0,0 @@ | |||
| Cython>=0.25.2 | ||||
| backports_abc>=0.5 | ||||
| numpy>=1.12.0 | ||||
|  | @ -1,12 +0,0 @@ | |||
| # How to build a GTSAM debian package | ||||
| 
 | ||||
| To use the ``debuild`` command, install the ``devscripts`` package | ||||
| 
 | ||||
|     sudo apt install devscripts | ||||
| 
 | ||||
| Change into the gtsam directory, then run: | ||||
| 
 | ||||
|     debuild -us -uc -j4 | ||||
| 
 | ||||
| Adjust the ``-j4`` depending on how many CPUs you want to build on in | ||||
| parallel.  | ||||
|  | @ -1,5 +0,0 @@ | |||
| gtsam (4.0.0-1berndpfrommer) bionic; urgency=medium | ||||
| 
 | ||||
|   * initial release | ||||
| 
 | ||||
|  -- Bernd Pfrommer <bernd.pfrommer@gmail.com>  Wed, 18 Jul 2018 20:36:44 -0400 | ||||
|  | @ -1 +0,0 @@ | |||
| 9 | ||||
|  | @ -1,15 +0,0 @@ | |||
| Source: gtsam | ||||
| Section: libs | ||||
| Priority: optional | ||||
| Maintainer: Frank Dellaert <frank@cc.gatech.edu> | ||||
| Uploaders: Jose Luis Blanco Claraco <joseluisblancoc@gmail.com>, Bernd Pfrommer <bernd.pfrommer@gmail.com> | ||||
| Build-Depends: cmake, libboost-all-dev (>= 1.58), libeigen3-dev, libtbb-dev, debhelper (>=9) | ||||
| Standards-Version: 3.9.7 | ||||
| Homepage: https://github.com/borglab/gtsam | ||||
| Vcs-Browser: https://github.com/borglab/gtsam | ||||
| 
 | ||||
| Package: libgtsam-dev | ||||
| Architecture: any | ||||
| Depends: ${shlibs:Depends}, ${misc:Depends} | ||||
| Description: Georgia Tech Smoothing and Mapping Library | ||||
|  gtsam: Georgia Tech Smoothing and Mapping library for SLAM type applications | ||||
|  | @ -1,15 +0,0 @@ | |||
| Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ | ||||
| Upstream-Name: gtsam | ||||
| Source: https://bitbucket.org/gtborg/gtsam.git | ||||
| 
 | ||||
| Files: * | ||||
| Copyright: 2017, Frank Dellaert | ||||
| License: BSD | ||||
| 
 | ||||
| Files: gtsam/3rdparty/CCOLAMD/* | ||||
| Copyright: 2005-2011, Univ. of Florida.  Authors: Timothy A. Davis, Sivasankaran Rajamanickam, and Stefan Larimore.  Closely based on COLAMD by Davis, Stefan Larimore, in collaboration with Esmond Ng, and John Gilbert. http://www.cise.ufl.edu/research/sparse | ||||
| License: GNU LESSER GENERAL PUBLIC LICENSE | ||||
| 
 | ||||
| Files: gtsam/3rdparty/Eigen/* | ||||
| Copyright: 2017, Multiple Authors | ||||
| License: MPL2 | ||||
|  | @ -1,25 +0,0 @@ | |||
| #!/usr/bin/make -f | ||||
| # See debhelper(7) (uncomment to enable) | ||||
| # output every command that modifies files on the build system. | ||||
| export DH_VERBOSE = 1 | ||||
| 
 | ||||
| 
 | ||||
| # see FEATURE AREAS in dpkg-buildflags(1) | ||||
| #export DEB_BUILD_MAINT_OPTIONS = hardening=+all | ||||
| 
 | ||||
| # see ENVIRONMENT in dpkg-buildflags(1) | ||||
| # package maintainers to append CFLAGS | ||||
| #export DEB_CFLAGS_MAINT_APPEND  = -Wall -pedantic | ||||
| # package maintainers to append LDFLAGS | ||||
| #export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed | ||||
| 
 | ||||
| 
 | ||||
| %: | ||||
| 	dh $@ --parallel | ||||
| 
 | ||||
| 
 | ||||
| # dh_make generated override targets | ||||
| # This is example for Cmake (See https://bugs.debian.org/641051 ) | ||||
| override_dh_auto_configure: | ||||
| 	dh_auto_configure -- -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_WRAP=OFF -DGTSAM_BUILD_DOCS=OFF -DGTSAM_INSTALL_CPPUNITLITE=OFF -DGTSAM_INSTALL_GEOGRAPHICLIB=OFF -DGTSAM_BUILD_TYPE_POSTFIXES=OFF | ||||
| 
 | ||||
|  | @ -1 +0,0 @@ | |||
| 3.0 (quilt) | ||||
|  | @ -5,7 +5,7 @@ NonlinearFactorGraph graph; | |||
| Pose2 priorMean(0.0, 0.0, 0.0); | ||||
| noiseModel::Diagonal::shared_ptr priorNoise = | ||||
|   noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1)); | ||||
| graph.add(PriorFactor<Pose2>(1, priorMean, priorNoise)); | ||||
| graph.addPrior(1, priorMean, priorNoise); | ||||
| 
 | ||||
| // Add two odometry factors
 | ||||
| Pose2 odometry(2.0, 0.0, 0.0); | ||||
|  |  | |||
|  | @ -1,7 +1,7 @@ | |||
| NonlinearFactorGraph graph; | ||||
| noiseModel::Diagonal::shared_ptr priorNoise = | ||||
|   noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1)); | ||||
| graph.add(PriorFactor<Pose2>(1, Pose2(0, 0, 0), priorNoise)); | ||||
| graph.addPrior(1, Pose2(0, 0, 0), priorNoise); | ||||
| 
 | ||||
| // Add odometry factors
 | ||||
| noiseModel::Diagonal::shared_ptr model = | ||||
|  |  | |||
|  | @ -1188,7 +1188,7 @@ USE_MATHJAX            = YES | |||
| # MathJax, but it is strongly recommended to install a local copy of MathJax  | ||||
| # before deployment. | ||||
| 
 | ||||
| MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest | ||||
| MATHJAX_RELPATH        = https://cdn.mathjax.org/mathjax/latest | ||||
| 
 | ||||
| # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension  | ||||
| # names that should be enabled during MathJax rendering. | ||||
|  |  | |||
|  | @ -2291,15 +2291,11 @@ uncalibration | |||
|  used in the residual). | ||||
| \end_layout | ||||
| 
 | ||||
| \begin_layout Standard | ||||
| \begin_inset Note Note | ||||
| status collapsed | ||||
| 
 | ||||
| \begin_layout Section | ||||
| Noise models of prior factors | ||||
| \end_layout | ||||
| 
 | ||||
| \begin_layout Plain Layout | ||||
| \begin_layout Standard | ||||
| The simplest way to describe noise models is by an example. | ||||
|  Let's take a prior factor on a 3D pose  | ||||
| \begin_inset Formula $x\in\SE 3$ | ||||
|  | @ -2353,7 +2349,7 @@ e\left(x\right)=\norm{h\left(x\right)}_{\Sigma}^{2}=h\left(x\right)^{\t}\Sigma^{ | |||
|  useful answer out quickly ] | ||||
| \end_layout | ||||
| 
 | ||||
| \begin_layout Plain Layout | ||||
| \begin_layout Standard | ||||
| The density induced by a noise model on the prior factor is Gaussian in | ||||
|  the tangent space about the linearization point. | ||||
|  Suppose that the pose is linearized at  | ||||
|  | @ -2431,7 +2427,7 @@ Here we see that the update | |||
| . | ||||
| \end_layout | ||||
| 
 | ||||
| \begin_layout Plain Layout | ||||
| \begin_layout Standard | ||||
| This means that to draw random pose samples, we actually draw random samples | ||||
|  of  | ||||
| \begin_inset Formula $\delta x$ | ||||
|  | @ -2456,7 +2452,7 @@ This means that to draw random pose samples, we actually draw random samples | |||
| Noise models of between factors | ||||
| \end_layout | ||||
| 
 | ||||
| \begin_layout Plain Layout | ||||
| \begin_layout Standard | ||||
| The noise model of a BetweenFactor is a bit more complicated. | ||||
|  The unwhitened error is | ||||
| \begin_inset Formula  | ||||
|  | @ -2516,11 +2512,6 @@ e\left(\delta x_{1}\right) & \approx\norm{\log\left(z^{-1}\left(x_{1}\exp\delta | |||
| \end_inset | ||||
| 
 | ||||
| 
 | ||||
| \end_layout | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
| 
 | ||||
| \end_layout | ||||
| 
 | ||||
| \end_body | ||||
|  |  | |||
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										189
									
								
								doc/math.lyx
								
								
								
								
							
							
						
						
									
										189
									
								
								doc/math.lyx
								
								
								
								
							|  | @ -1,7 +1,9 @@ | |||
| #LyX 2.1 created this file. For more info see http://www.lyx.org/ | ||||
| \lyxformat 474 | ||||
| #LyX 2.3 created this file. For more info see http://www.lyx.org/ | ||||
| \lyxformat 544 | ||||
| \begin_document | ||||
| \begin_header | ||||
| \save_transient_properties true | ||||
| \origin unavailable | ||||
| \textclass article | ||||
| \use_default_options false | ||||
| \begin_modules | ||||
|  | @ -14,16 +16,18 @@ theorems-ams-bytype | |||
| \language_package default | ||||
| \inputencoding auto | ||||
| \fontencoding global | ||||
| \font_roman times | ||||
| \font_sans default | ||||
| \font_typewriter default | ||||
| \font_math auto | ||||
| \font_roman "times" "default" | ||||
| \font_sans "default" "default" | ||||
| \font_typewriter "default" "default" | ||||
| \font_math "auto" "auto" | ||||
| \font_default_family rmdefault | ||||
| \use_non_tex_fonts false | ||||
| \font_sc false | ||||
| \font_osf false | ||||
| \font_sf_scale 100 | ||||
| \font_tt_scale 100 | ||||
| \font_sf_scale 100 100 | ||||
| \font_tt_scale 100 100 | ||||
| \use_microtype false | ||||
| \use_dash_ligatures true | ||||
| \graphics default | ||||
| \default_output_format default | ||||
| \output_sync 0 | ||||
|  | @ -53,6 +57,7 @@ theorems-ams-bytype | |||
| \suppress_date false | ||||
| \justification true | ||||
| \use_refstyle 0 | ||||
| \use_minted 0 | ||||
| \index Index | ||||
| \shortcut idx | ||||
| \color #008000 | ||||
|  | @ -65,7 +70,10 @@ theorems-ams-bytype | |||
| \tocdepth 3 | ||||
| \paragraph_separation indent | ||||
| \paragraph_indentation default | ||||
| \quotes_language english | ||||
| \is_math_indent 0 | ||||
| \math_numbering_side default | ||||
| \quotes_style english | ||||
| \dynamic_quotes 0 | ||||
| \papercolumns 1 | ||||
| \papersides 1 | ||||
| \paperpagestyle default | ||||
|  | @ -98,6 +106,11 @@ width "100col%" | |||
| special "none" | ||||
| height "1in" | ||||
| height_special "totalheight" | ||||
| thickness "0.4pt" | ||||
| separation "3pt" | ||||
| shadowsize "4pt" | ||||
| framecolor "black" | ||||
| backgroundcolor "none" | ||||
| status collapsed | ||||
| 
 | ||||
| \begin_layout Plain Layout | ||||
|  | @ -654,6 +667,7 @@ reference "eq:LocalBehavior" | |||
| \begin_inset CommandInset citation | ||||
| LatexCommand cite | ||||
| key "Spivak65book" | ||||
| literal "true" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -934,6 +948,7 @@ See | |||
| \begin_inset CommandInset citation | ||||
| LatexCommand cite | ||||
| key "Spivak65book" | ||||
| literal "true" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -1025,6 +1040,7 @@ See | |||
| \begin_inset CommandInset citation | ||||
| LatexCommand cite | ||||
| key "Spivak65book" | ||||
| literal "true" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -2209,6 +2225,7 @@ instantaneous velocity | |||
| LatexCommand cite | ||||
| after "page 51 for rotations, page 419 for SE(3)" | ||||
| key "Murray94book" | ||||
| literal "true" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -2965,7 +2982,7 @@ B^{T} & I_{3}\end{array}\right] | |||
| \begin_layout Subsection | ||||
| \begin_inset CommandInset label | ||||
| LatexCommand label | ||||
| name "sub:Pushforward-of-Between" | ||||
| name "subsec:Pushforward-of-Between" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -3419,6 +3436,7 @@ A retraction | |||
| \begin_inset CommandInset citation | ||||
| LatexCommand cite | ||||
| key "Absil07book" | ||||
| literal "true" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -3873,7 +3891,7 @@ BetweenFactor | |||
| , derived in Section  | ||||
| \begin_inset CommandInset ref | ||||
| LatexCommand ref | ||||
| reference "sub:Pushforward-of-Between" | ||||
| reference "subsec:Pushforward-of-Between" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -4430,7 +4448,7 @@ In the case of | |||
| \begin_inset Formula $\SOthree$ | ||||
| \end_inset | ||||
| 
 | ||||
|  the vector space is   | ||||
|  the vector space is  | ||||
| \begin_inset Formula $\Rthree$ | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -4502,7 +4520,7 @@ reference "Th:InverseAction" | |||
| \begin_layout Subsection | ||||
| \begin_inset CommandInset label | ||||
| LatexCommand label | ||||
| name "sub:3DAngularVelocities" | ||||
| name "subsec:3DAngularVelocities" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -4695,6 +4713,7 @@ Absil | |||
| LatexCommand cite | ||||
| after "page 58" | ||||
| key "Absil07book" | ||||
| literal "true" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -5395,6 +5414,7 @@ While not a Lie group, we can define an exponential map, which is given | |||
| \begin_inset CommandInset citation | ||||
| LatexCommand cite | ||||
| key "Ma01ijcv" | ||||
| literal "true" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -5402,6 +5422,7 @@ key "Ma01ijcv" | |||
| \begin_inset CommandInset href | ||||
| LatexCommand href | ||||
| name "http://stat.fsu.edu/~anuj/CVPR_Tutorial/Part2.pdf" | ||||
| literal "false" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -5605,6 +5626,7 @@ The exponential map uses | |||
| \begin_inset CommandInset citation | ||||
| LatexCommand cite | ||||
| key "Absil07book" | ||||
| literal "true" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -6293,7 +6315,7 @@ d^{c}=R_{w}^{c}\left(d^{w}+(t^{w}v^{w})v^{w}-t^{w}\right) | |||
| \end_layout | ||||
| 
 | ||||
| \begin_layout Section | ||||
| Line3 (Ocaml) | ||||
| Line3 | ||||
| \end_layout | ||||
| 
 | ||||
| \begin_layout Standard | ||||
|  | @ -6345,6 +6367,14 @@ R'=R(I+\Omega) | |||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
| 
 | ||||
| \end_layout | ||||
| 
 | ||||
| \begin_layout Subsection | ||||
| Projecting Line3 | ||||
| \end_layout | ||||
| 
 | ||||
| \begin_layout Standard | ||||
| Projecting a line to 2D can be done easily, as both  | ||||
| \begin_inset Formula $v$ | ||||
| \end_inset | ||||
|  | @ -6430,13 +6460,21 @@ or the | |||
| 
 | ||||
| \end_layout | ||||
| 
 | ||||
| \begin_layout Subsection | ||||
| Action of  | ||||
| \begin_inset Formula $\SEthree$ | ||||
| \end_inset | ||||
| 
 | ||||
|  on the line | ||||
| \end_layout | ||||
| 
 | ||||
| \begin_layout Standard | ||||
| Transforming a 3D line  | ||||
| \begin_inset Formula $(R,(a,b))$ | ||||
| \end_inset | ||||
| 
 | ||||
|  from a world coordinate frame to a camera frame  | ||||
| \begin_inset Formula $(R_{w}^{c},t^{w})$ | ||||
| \begin_inset Formula $T_{c}^{w}=(R_{c}^{w},t^{w})$ | ||||
| \end_inset | ||||
| 
 | ||||
|  is done by | ||||
|  | @ -6466,17 +6504,115 @@ b'=b-R_{2}^{T}t^{w} | |||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
| Again, we need to redo the derivatives, as R is incremented from the right. | ||||
|  The first argument is incremented from the left, but the result is incremented | ||||
|  on the right: | ||||
| where  | ||||
| \begin_inset Formula $R_{1}$ | ||||
| \end_inset | ||||
| 
 | ||||
|  and  | ||||
| \begin_inset Formula $R_{2}$ | ||||
| \end_inset | ||||
| 
 | ||||
|  are the columns of  | ||||
| \begin_inset Formula $R$ | ||||
| \end_inset | ||||
| 
 | ||||
|  , as before. | ||||
|   | ||||
| \end_layout | ||||
| 
 | ||||
| \begin_layout Standard | ||||
| To find the derivatives, the transformation of a line  | ||||
| \begin_inset Formula $l^{w}=(R,a,b)$ | ||||
| \end_inset | ||||
| 
 | ||||
|  from world coordinates to a camera coordinate frame  | ||||
| \begin_inset Formula $T_{c}^{w}$ | ||||
| \end_inset | ||||
| 
 | ||||
| , specified in world coordinates, can be written as a function  | ||||
| \begin_inset Formula $f:\SEthree\times L\rightarrow L$ | ||||
| \end_inset | ||||
| 
 | ||||
| , as given above, i.e.,  | ||||
| \begin_inset Formula  | ||||
| \begin{eqnarray*} | ||||
| R'(I+\Omega')=(AB)(I+\Omega') & = & (I+\Skew{S\omega})AB\\ | ||||
| I+\Omega' & = & (AB)^{T}(I+\Skew{S\omega})(AB)\\ | ||||
| \Omega' & = & R'^{T}\Skew{S\omega}R'\\ | ||||
| \Omega' & = & \Skew{R'^{T}S\omega}\\ | ||||
| \omega' & = & R'^{T}S\omega | ||||
| \end{eqnarray*} | ||||
| \[ | ||||
| f(T_{c}^{w},l^{w})=\left(\left(R_{c}^{w}\right)^{T}R,a-R_{1}^{T}t^{w},b-R_{2}^{T}t^{w}\right). | ||||
| \] | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
| Let us find the Jacobian  | ||||
| \begin_inset Formula $J_{1}$ | ||||
| \end_inset | ||||
| 
 | ||||
|  of  | ||||
| \begin_inset Formula $f$ | ||||
| \end_inset | ||||
| 
 | ||||
|  with respect to the first argument  | ||||
| \begin_inset Formula $T_{c}^{w}$ | ||||
| \end_inset | ||||
| 
 | ||||
| , which should obey | ||||
| \begin_inset Formula  | ||||
| \begin{align*} | ||||
| f(T_{c}^{w}e^{\xihat},l^{w}) & \approx f(T_{c}^{w},l^{w})+J_{1}\xi | ||||
| \end{align*} | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
| Note that | ||||
| \begin_inset Formula  | ||||
| \[ | ||||
| T_{c}^{w}e^{\xihat}\approx\left[\begin{array}{cc} | ||||
| R_{c}^{w}\left(I_{3}+\Skew{\omega}\right) & t^{w}+R_{c}^{w}v\\ | ||||
| 0 & 1 | ||||
| \end{array}\right] | ||||
| \] | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
| Let's write this out separately for each of  | ||||
| \begin_inset Formula $R,a,b$ | ||||
| \end_inset | ||||
| 
 | ||||
| : | ||||
| \begin_inset Formula  | ||||
| \begin{align*} | ||||
| \left(R_{c}^{w}\left(I_{3}+\Skew{\omega}\right)\right)^{T}R & \approx\left(R_{c}^{w}\right)^{T}R(I+\left[J_{R\omega}\omega\right]_{\times})\\ | ||||
| a-R_{1}^{T}\left(t^{w}+R_{c}^{w}v\right) & \approx a-R_{1}^{T}t^{w}+J_{av}v\\ | ||||
| b-R_{2}^{T}\left(t^{w}+R_{c}^{w}v\right) & \approx b-R_{2}^{T}t^{w}+J_{bv}v | ||||
| \end{align*} | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
| Simplifying, we get: | ||||
| \begin_inset Formula  | ||||
| \begin{align*} | ||||
| -\Skew{\omega}R' & \approx R'\left[J_{R\omega}\omega\right]_{\times}\\ | ||||
| -R_{1}^{T}R_{c}^{w} & \approx J_{av}\\ | ||||
| -R_{2}^{T}R_{c}^{w} & \approx J_{bv} | ||||
| \end{align*} | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
| which gives the expressions for  | ||||
| \begin_inset Formula $J_{av}$ | ||||
| \end_inset | ||||
| 
 | ||||
|  and  | ||||
| \begin_inset Formula $J_{bv}$ | ||||
| \end_inset | ||||
| 
 | ||||
| . | ||||
|  The top line can be further simplified: | ||||
| \begin_inset Formula  | ||||
| \begin{align*} | ||||
| -\Skew{\omega}R' & \approx R'\left[J_{R\omega}\omega\right]_{\times}\\ | ||||
| -R'^{T}\Skew{\omega}R' & \approx\left[J_{R\omega}\omega\right]_{\times}\\ | ||||
| -\Skew{R'^{T}\omega} & \approx\left[J_{R\omega}\omega\right]_{\times}\\ | ||||
| -R'^{T} & \approx J_{R\omega} | ||||
| \end{align*} | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -6687,6 +6823,7 @@ Spivak | |||
| \begin_inset CommandInset citation | ||||
| LatexCommand cite | ||||
| key "Spivak65book" | ||||
| literal "true" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -6795,6 +6932,7 @@ The following is adapted from Appendix A in | |||
| \begin_inset CommandInset citation | ||||
| LatexCommand cite | ||||
| key "Murray94book" | ||||
| literal "true" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  | @ -6924,6 +7062,7 @@ might be | |||
| LatexCommand cite | ||||
| after "page 45" | ||||
| key "Hall00book" | ||||
| literal "true" | ||||
| 
 | ||||
| \end_inset | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										
											BIN
										
									
								
								doc/math.pdf
								
								
								
								
							
							
						
						
									
										
											BIN
										
									
								
								doc/math.pdf
								
								
								
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							|  | @ -0,0 +1,21 @@ | |||
| # Instructions | ||||
| 
 | ||||
| Build all docker images, in order: | ||||
| 
 | ||||
| ```bash | ||||
| (cd ubuntu-boost-tbb && ./build.sh) | ||||
| (cd ubuntu-gtsam && ./build.sh) | ||||
| (cd ubuntu-gtsam-python && ./build.sh) | ||||
| (cd ubuntu-gtsam-python-vnc && ./build.sh) | ||||
| ``` | ||||
| 
 | ||||
| Then launch with:  | ||||
| 
 | ||||
|     docker run -p 5900:5900 dellaert/ubuntu-gtsam-python-vnc:bionic | ||||
| 
 | ||||
| Then open a remote VNC X client, for example: | ||||
| 
 | ||||
|     sudo apt-get install tigervnc-viewer | ||||
|     xtigervncviewer :5900 | ||||
| 
 | ||||
| 
 | ||||
|  | @ -1,18 +0,0 @@ | |||
| # Get the base Ubuntu image from Docker Hub | ||||
| FROM ubuntu:bionic | ||||
| 
 | ||||
| # Update apps on the base image | ||||
| RUN apt-get -y update && apt-get install -y | ||||
| 
 | ||||
| # Install C++ | ||||
| RUN apt-get -y install build-essential  | ||||
| 
 | ||||
| # Install boost and cmake | ||||
| RUN apt-get -y install libboost-all-dev cmake | ||||
| 
 | ||||
| # Install TBB | ||||
| RUN apt-get -y install libtbb-dev | ||||
| 
 | ||||
| # Install latest Eigen | ||||
| RUN apt-get install -y libeigen3-dev | ||||
| 
 | ||||
|  | @ -0,0 +1,19 @@ | |||
| # Basic Ubuntu 18.04 image with Boost and TBB installed. To be used for building further downstream packages. | ||||
| 
 | ||||
| # Get the base Ubuntu image from Docker Hub | ||||
| FROM ubuntu:bionic | ||||
| 
 | ||||
| # Disable GUI prompts | ||||
| ENV DEBIAN_FRONTEND noninteractive | ||||
| 
 | ||||
| # Update apps on the base image | ||||
| RUN apt-get -y update && apt-get -y install | ||||
| 
 | ||||
| # Install C++ | ||||
| RUN apt-get -y install build-essential  apt-utils | ||||
| 
 | ||||
| # Install boost and cmake | ||||
| RUN apt-get -y install libboost-all-dev cmake | ||||
| 
 | ||||
| # Install TBB | ||||
| RUN apt-get -y install libtbb-dev | ||||
|  | @ -0,0 +1,3 @@ | |||
| # Build command for Docker image | ||||
| # TODO(dellaert): use docker compose and/or cmake | ||||
| docker build --no-cache -t dellaert/ubuntu-boost-tbb:bionic . | ||||
|  | @ -0,0 +1,20 @@ | |||
| # This GTSAM image connects to the host X-server via VNC to provide a Graphical User Interface for interaction. | ||||
| 
 | ||||
| # Get the base Ubuntu/GTSAM image from Docker Hub | ||||
| FROM dellaert/ubuntu-gtsam-python:bionic | ||||
| 
 | ||||
| # Things needed to get a python GUI | ||||
| ENV DEBIAN_FRONTEND noninteractive | ||||
| RUN apt install -y python-tk | ||||
| RUN python3 -m pip install matplotlib | ||||
| 
 | ||||
| # Install a VNC X-server, Frame buffer, and windows manager | ||||
| RUN apt install -y x11vnc xvfb fluxbox | ||||
| 
 | ||||
| # Finally, install wmctrl needed for bootstrap script | ||||
| RUN apt install -y wmctrl | ||||
| 
 | ||||
| # Copy bootstrap script and make sure it runs | ||||
| COPY bootstrap.sh / | ||||
| 
 | ||||
| CMD '/bootstrap.sh' | ||||
|  | @ -0,0 +1,111 @@ | |||
| #!/bin/bash | ||||
| 
 | ||||
| # Based on: http://www.richud.com/wiki/Ubuntu_Fluxbox_GUI_with_x11vnc_and_Xvfb | ||||
| 
 | ||||
| main() { | ||||
|     log_i "Starting xvfb virtual display..." | ||||
|     launch_xvfb | ||||
|     log_i "Starting window manager..." | ||||
|     launch_window_manager | ||||
|     log_i "Starting VNC server..." | ||||
|     run_vnc_server | ||||
| } | ||||
| 
 | ||||
| launch_xvfb() { | ||||
|     local xvfbLockFilePath="/tmp/.X1-lock" | ||||
|     if [ -f "${xvfbLockFilePath}" ] | ||||
|     then | ||||
|         log_i "Removing xvfb lock file '${xvfbLockFilePath}'..." | ||||
|         if ! rm -v "${xvfbLockFilePath}" | ||||
|         then | ||||
|             log_e "Failed to remove xvfb lock file" | ||||
|             exit 1 | ||||
|         fi | ||||
|     fi | ||||
| 
 | ||||
|     # Set defaults if the user did not specify envs. | ||||
|     export DISPLAY=${XVFB_DISPLAY:-:1} | ||||
|     local screen=${XVFB_SCREEN:-0} | ||||
|     local resolution=${XVFB_RESOLUTION:-1280x960x24} | ||||
|     local timeout=${XVFB_TIMEOUT:-5} | ||||
| 
 | ||||
|     # Start and wait for either Xvfb to be fully up or we hit the timeout. | ||||
|     Xvfb ${DISPLAY} -screen ${screen} ${resolution} & | ||||
|     local loopCount=0 | ||||
|     until xdpyinfo -display ${DISPLAY} > /dev/null 2>&1 | ||||
|     do | ||||
|         loopCount=$((loopCount+1)) | ||||
|         sleep 1 | ||||
|         if [ ${loopCount} -gt ${timeout} ] | ||||
|         then | ||||
|             log_e "xvfb failed to start" | ||||
|             exit 1 | ||||
|         fi | ||||
|     done | ||||
| } | ||||
| 
 | ||||
| launch_window_manager() { | ||||
|     local timeout=${XVFB_TIMEOUT:-5} | ||||
| 
 | ||||
|     # Start and wait for either fluxbox to be fully up or we hit the timeout. | ||||
|     fluxbox & | ||||
|     local loopCount=0 | ||||
|     until wmctrl -m > /dev/null 2>&1 | ||||
|     do | ||||
|         loopCount=$((loopCount+1)) | ||||
|         sleep 1 | ||||
|         if [ ${loopCount} -gt ${timeout} ] | ||||
|         then | ||||
|             log_e "fluxbox failed to start" | ||||
|             exit 1 | ||||
|         fi | ||||
|     done | ||||
| } | ||||
| 
 | ||||
| run_vnc_server() { | ||||
|     local passwordArgument='-nopw' | ||||
| 
 | ||||
|     if [ -n "${VNC_SERVER_PASSWORD}" ] | ||||
|     then | ||||
|         local passwordFilePath="${HOME}/.x11vnc.pass" | ||||
|         if ! x11vnc -storepasswd "${VNC_SERVER_PASSWORD}" "${passwordFilePath}" | ||||
|         then | ||||
|             log_e "Failed to store x11vnc password" | ||||
|             exit 1 | ||||
|         fi | ||||
|         passwordArgument=-"-rfbauth ${passwordFilePath}" | ||||
|         log_i "The VNC server will ask for a password" | ||||
|     else | ||||
|         log_w "The VNC server will NOT ask for a password" | ||||
|     fi | ||||
| 
 | ||||
|     x11vnc -ncache 10 -ncache_cr -display ${DISPLAY} -forever ${passwordArgument} & | ||||
|     wait $! | ||||
| } | ||||
| 
 | ||||
| log_i() { | ||||
|     log "[INFO] ${@}" | ||||
| } | ||||
| 
 | ||||
| log_w() { | ||||
|     log "[WARN] ${@}" | ||||
| } | ||||
| 
 | ||||
| log_e() { | ||||
|     log "[ERROR] ${@}" | ||||
| } | ||||
| 
 | ||||
| log() { | ||||
|     echo "[$(date '+%Y-%m-%d %H:%M:%S')] ${@}" | ||||
| } | ||||
| 
 | ||||
| control_c() { | ||||
|     echo "" | ||||
|     exit | ||||
| } | ||||
| 
 | ||||
| trap control_c SIGINT SIGTERM SIGHUP | ||||
| 
 | ||||
| main | ||||
| 
 | ||||
| exit | ||||
|  | @ -0,0 +1,4 @@ | |||
| # Build command for Docker image | ||||
| # TODO(dellaert): use docker compose and/or cmake | ||||
| # Needs to be run in docker/ubuntu-gtsam-python-vnc directory | ||||
| docker build -t dellaert/ubuntu-gtsam-python-vnc:bionic . | ||||
|  | @ -0,0 +1,5 @@ | |||
| # After running this script, connect VNC client to 0.0.0.0:5900 | ||||
| docker run -it \ | ||||
|     --workdir="/usr/src/gtsam" \ | ||||
|     -p 5900:5900 \ | ||||
|     dellaert/ubuntu-gtsam-python-vnc:bionic | ||||
|  | @ -0,0 +1,31 @@ | |||
| #  GTSAM Ubuntu image with Python wrapper support. | ||||
| 
 | ||||
| # Get the base Ubuntu/GTSAM image from Docker Hub | ||||
| FROM dellaert/ubuntu-gtsam:bionic | ||||
| 
 | ||||
| # Install pip | ||||
| RUN apt-get install -y python3-pip python3-dev | ||||
| 
 | ||||
| # Install python wrapper requirements | ||||
| RUN python3 -m pip install -U -r /usr/src/gtsam/python/requirements.txt | ||||
| 
 | ||||
| # Run cmake again, now with python toolbox on | ||||
| WORKDIR /usr/src/gtsam/build | ||||
| RUN cmake \ | ||||
|     -DCMAKE_BUILD_TYPE=Release \ | ||||
|     -DGTSAM_WITH_EIGEN_MKL=OFF \ | ||||
|     -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ | ||||
|     -DGTSAM_BUILD_TIMING_ALWAYS=OFF \ | ||||
|     -DGTSAM_BUILD_TESTS=OFF \ | ||||
|     -DGTSAM_BUILD_PYTHON=ON \ | ||||
|     -DGTSAM_PYTHON_VERSION=3\ | ||||
|     .. | ||||
| 
 | ||||
| # Build again, as ubuntu-gtsam image cleaned | ||||
| RUN make -j4 install && make clean | ||||
| 
 | ||||
| # Needed to run python wrapper: | ||||
| RUN echo 'export PYTHONPATH=/usr/local/python/:$PYTHONPATH' >> /root/.bashrc | ||||
| 
 | ||||
| # Run bash | ||||
| CMD ["bash"] | ||||
|  | @ -0,0 +1,3 @@ | |||
| # Build command for Docker image | ||||
| # TODO(dellaert): use docker compose and/or cmake | ||||
| docker build --no-cache -t dellaert/ubuntu-gtsam-python:bionic . | ||||
|  | @ -0,0 +1,35 @@ | |||
| # Ubuntu image with GTSAM installed. Configured with  Boost and TBB support. | ||||
| 
 | ||||
| # Get the base Ubuntu image from Docker Hub | ||||
| FROM dellaert/ubuntu-boost-tbb:bionic | ||||
| 
 | ||||
| # Install git | ||||
| RUN apt-get update && \ | ||||
|     apt-get install -y git | ||||
| 
 | ||||
| # Install compiler | ||||
| RUN apt-get install -y build-essential | ||||
| 
 | ||||
| # Clone GTSAM (develop branch) | ||||
| WORKDIR /usr/src/ | ||||
| RUN git clone --single-branch --branch develop https://github.com/borglab/gtsam.git | ||||
| 
 | ||||
| # Change to build directory. Will be created automatically. | ||||
| WORKDIR /usr/src/gtsam/build | ||||
| # Run cmake | ||||
| RUN cmake \ | ||||
|     -DCMAKE_BUILD_TYPE=Release \ | ||||
|     -DGTSAM_WITH_EIGEN_MKL=OFF \ | ||||
|     -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ | ||||
|     -DGTSAM_BUILD_TIMING_ALWAYS=OFF \ | ||||
|     -DGTSAM_BUILD_TESTS=OFF \ | ||||
|     .. | ||||
| 
 | ||||
| # Build | ||||
| RUN make -j4 install && make clean | ||||
| 
 | ||||
| # Needed to link with GTSAM | ||||
| RUN echo 'export LD_LIBRARY_PATH=/usr/local/lib:LD_LIBRARY_PATH' >> /root/.bashrc | ||||
| 
 | ||||
| # Run bash | ||||
| CMD ["bash"] | ||||
|  | @ -0,0 +1,3 @@ | |||
| # Build command for Docker image | ||||
| # TODO(dellaert): use docker compose and/or cmake | ||||
| docker build --no-cache -t dellaert/ubuntu-gtsam:bionic . | ||||
|  | @ -1,7 +1,4 @@ | |||
| set (excluded_examples | ||||
|     DiscreteBayesNet_FG.cpp | ||||
|     UGM_chain.cpp | ||||
|     UGM_small.cpp | ||||
|     elaboratePoint2KalmanFilter.cpp | ||||
| ) | ||||
| 
 | ||||
|  |  | |||
|  | @ -18,7 +18,8 @@ | |||
| 
 | ||||
| #include <gtsam/inference/Symbol.h> | ||||
| #include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h> | ||||
| #include <gtsam/geometry/SimpleCamera.h> | ||||
| #include <gtsam/geometry/PinholeCamera.h> | ||||
| #include <gtsam/geometry/Cal3_S2.h> | ||||
| #include <boost/make_shared.hpp> | ||||
| 
 | ||||
| using namespace gtsam; | ||||
|  | @ -45,9 +46,9 @@ public: | |||
|   } | ||||
| 
 | ||||
|   /// evaluate the error
 | ||||
|   virtual Vector evaluateError(const Pose3& pose, boost::optional<Matrix&> H = | ||||
|       boost::none) const { | ||||
|     SimpleCamera camera(pose, *K_); | ||||
|   Vector evaluateError(const Pose3& pose, boost::optional<Matrix&> H = | ||||
|       boost::none) const override { | ||||
|     PinholeCamera<Cal3_S2> camera(pose, *K_); | ||||
|     return camera.project(P_, H, boost::none, boost::none) - p_; | ||||
|   } | ||||
| }; | ||||
|  |  | |||
|  | @ -0,0 +1,303 @@ | |||
| /* ----------------------------------------------------------------------------
 | ||||
| 
 | ||||
|  * GTSAM Copyright 2010, Georgia Tech Research Corporation, | ||||
|  * Atlanta, Georgia 30332-0415 | ||||
|  * All Rights Reserved | ||||
|  * Authors: Frank Dellaert, et al. (see THANKS for the full author list) | ||||
| 
 | ||||
|  * See LICENSE for the license information | ||||
| 
 | ||||
|  * -------------------------------------------------------------------------- */ | ||||
| 
 | ||||
| /**
 | ||||
|  * @file CombinedImuFactorsExample | ||||
|  * @brief Test example for using GTSAM ImuCombinedFactor | ||||
|  * navigation code. | ||||
|  * @author Varun Agrawal | ||||
|  */ | ||||
| 
 | ||||
| /**
 | ||||
|  * Example of use of the CombinedImuFactor in | ||||
|  * conjunction with GPS | ||||
|  *  - we read IMU and GPS data from a CSV file, with the following format: | ||||
|  *  A row starting with "i" is the first initial position formatted with | ||||
|  *  N, E, D, qx, qY, qZ, qW, velN, velE, velD | ||||
|  *  A row starting with "0" is an imu measurement | ||||
|  *  linAccN, linAccE, linAccD, angVelN, angVelE, angVelD | ||||
|  *  A row starting with "1" is a gps correction formatted with | ||||
|  *  N, E, D, qX, qY, qZ, qW | ||||
|  * Note that for GPS correction, we're only using the position not the | ||||
|  * rotation. The rotation is provided in the file for ground truth comparison. | ||||
|  * | ||||
|  *  See usage: ./CombinedImuFactorsExample --help | ||||
|  */ | ||||
| 
 | ||||
| #include <boost/program_options.hpp> | ||||
| 
 | ||||
| // GTSAM related includes.
 | ||||
| #include <gtsam/inference/Symbol.h> | ||||
| #include <gtsam/navigation/CombinedImuFactor.h> | ||||
| #include <gtsam/navigation/GPSFactor.h> | ||||
| #include <gtsam/navigation/ImuFactor.h> | ||||
| #include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h> | ||||
| #include <gtsam/nonlinear/NonlinearFactorGraph.h> | ||||
| #include <gtsam/slam/BetweenFactor.h> | ||||
| #include <gtsam/slam/dataset.h> | ||||
| 
 | ||||
| #include <cstring> | ||||
| #include <fstream> | ||||
| #include <iostream> | ||||
| 
 | ||||
| using namespace gtsam; | ||||
| using namespace std; | ||||
| 
 | ||||
| using symbol_shorthand::B;  // Bias  (ax,ay,az,gx,gy,gz)
 | ||||
| using symbol_shorthand::V;  // Vel   (xdot,ydot,zdot)
 | ||||
| using symbol_shorthand::X;  // Pose3 (x,y,z,r,p,y)
 | ||||
| 
 | ||||
| namespace po = boost::program_options; | ||||
| 
 | ||||
| po::variables_map parseOptions(int argc, char* argv[]) { | ||||
|   po::options_description desc; | ||||
|   desc.add_options()("help,h", "produce help message")( | ||||
|       "data_csv_path", po::value<string>()->default_value("imuAndGPSdata.csv"), | ||||
|       "path to the CSV file with the IMU data")( | ||||
|       "output_filename", | ||||
|       po::value<string>()->default_value("imuFactorExampleResults.csv"), | ||||
|       "path to the result file to use")("use_isam", po::bool_switch(), | ||||
|                                         "use ISAM as the optimizer"); | ||||
| 
 | ||||
|   po::variables_map vm; | ||||
|   po::store(po::parse_command_line(argc, argv, desc), vm); | ||||
| 
 | ||||
|   if (vm.count("help")) { | ||||
|     cout << desc << "\n"; | ||||
|     exit(1); | ||||
|   } | ||||
| 
 | ||||
|   return vm; | ||||
| } | ||||
| 
 | ||||
| Vector10 readInitialState(ifstream& file) { | ||||
|   string value; | ||||
|   // Format is (N,E,D,qX,qY,qZ,qW,velN,velE,velD)
 | ||||
|   Vector10 initial_state; | ||||
|   getline(file, value, ',');  // i
 | ||||
|   for (int i = 0; i < 9; i++) { | ||||
|     getline(file, value, ','); | ||||
|     initial_state(i) = stof(value.c_str()); | ||||
|   } | ||||
|   getline(file, value, '\n'); | ||||
|   initial_state(9) = stof(value.c_str()); | ||||
| 
 | ||||
|   return initial_state; | ||||
| } | ||||
| 
 | ||||
| boost::shared_ptr<PreintegratedCombinedMeasurements::Params> imuParams() { | ||||
|   // We use the sensor specs to build the noise model for the IMU factor.
 | ||||
|   double accel_noise_sigma = 0.0003924; | ||||
|   double gyro_noise_sigma = 0.000205689024915; | ||||
|   double accel_bias_rw_sigma = 0.004905; | ||||
|   double gyro_bias_rw_sigma = 0.000001454441043; | ||||
|   Matrix33 measured_acc_cov = I_3x3 * pow(accel_noise_sigma, 2); | ||||
|   Matrix33 measured_omega_cov = I_3x3 * pow(gyro_noise_sigma, 2); | ||||
|   Matrix33 integration_error_cov = | ||||
|       I_3x3 * 1e-8;  // error committed in integrating position from velocities
 | ||||
|   Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2); | ||||
|   Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2); | ||||
|   Matrix66 bias_acc_omega_int = | ||||
|       I_6x6 * 1e-5;  // error in the bias used for preintegration
 | ||||
| 
 | ||||
|   auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0); | ||||
|   // PreintegrationBase params:
 | ||||
|   p->accelerometerCovariance = | ||||
|       measured_acc_cov;  // acc white noise in continuous
 | ||||
|   p->integrationCovariance = | ||||
|       integration_error_cov;  // integration uncertainty continuous
 | ||||
|   // should be using 2nd order integration
 | ||||
|   // PreintegratedRotation params:
 | ||||
|   p->gyroscopeCovariance = | ||||
|       measured_omega_cov;  // gyro white noise in continuous
 | ||||
|   // PreintegrationCombinedMeasurements params:
 | ||||
|   p->biasAccCovariance = bias_acc_cov;      // acc bias in continuous
 | ||||
|   p->biasOmegaCovariance = bias_omega_cov;  // gyro bias in continuous
 | ||||
|   p->biasAccOmegaInt = bias_acc_omega_int; | ||||
| 
 | ||||
|   return p; | ||||
| } | ||||
| 
 | ||||
| int main(int argc, char* argv[]) { | ||||
|   string data_filename, output_filename; | ||||
|   po::variables_map var_map = parseOptions(argc, argv); | ||||
| 
 | ||||
|   data_filename = findExampleDataFile(var_map["data_csv_path"].as<string>()); | ||||
|   output_filename = var_map["output_filename"].as<string>(); | ||||
| 
 | ||||
|   // Set up output file for plotting errors
 | ||||
|   FILE* fp_out = fopen(output_filename.c_str(), "w+"); | ||||
|   fprintf(fp_out, | ||||
|           "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m),gt_qx," | ||||
|           "gt_qy,gt_qz,gt_qw\n"); | ||||
| 
 | ||||
|   // Begin parsing the CSV file.  Input the first line for initialization.
 | ||||
|   // From there, we'll iterate through the file and we'll preintegrate the IMU
 | ||||
|   // or add in the GPS given the input.
 | ||||
|   ifstream file(data_filename.c_str()); | ||||
| 
 | ||||
|   Vector10 initial_state = readInitialState(file); | ||||
|   cout << "initial state:\n" << initial_state.transpose() << "\n\n"; | ||||
| 
 | ||||
|   // Assemble initial quaternion through GTSAM constructor
 | ||||
|   // ::Quaternion(w,x,y,z);
 | ||||
|   Rot3 prior_rotation = Rot3::Quaternion(initial_state(6), initial_state(3), | ||||
|                                          initial_state(4), initial_state(5)); | ||||
|   Point3 prior_point(initial_state.head<3>()); | ||||
|   Pose3 prior_pose(prior_rotation, prior_point); | ||||
|   Vector3 prior_velocity(initial_state.tail<3>()); | ||||
| 
 | ||||
|   imuBias::ConstantBias prior_imu_bias;  // assume zero initial bias
 | ||||
| 
 | ||||
|   int index = 0; | ||||
| 
 | ||||
|   Values initial_values; | ||||
| 
 | ||||
|   // insert pose at initialization
 | ||||
|   initial_values.insert(X(index), prior_pose); | ||||
|   initial_values.insert(V(index), prior_velocity); | ||||
|   initial_values.insert(B(index), prior_imu_bias); | ||||
| 
 | ||||
|   // Assemble prior noise model and add it the graph.`
 | ||||
|   auto pose_noise_model = noiseModel::Diagonal::Sigmas( | ||||
|       (Vector(6) << 0.01, 0.01, 0.01, 0.5, 0.5, 0.5) | ||||
|           .finished());  // rad,rad,rad,m, m, m
 | ||||
|   auto velocity_noise_model = noiseModel::Isotropic::Sigma(3, 0.1);  // m/s
 | ||||
|   auto bias_noise_model = noiseModel::Isotropic::Sigma(6, 1e-3); | ||||
| 
 | ||||
|   // Add all prior factors (pose, velocity, bias) to the graph.
 | ||||
|   NonlinearFactorGraph graph; | ||||
|   graph.addPrior<Pose3>(X(index), prior_pose, pose_noise_model); | ||||
|   graph.addPrior<Vector3>(V(index), prior_velocity, velocity_noise_model); | ||||
|   graph.addPrior<imuBias::ConstantBias>(B(index), prior_imu_bias, | ||||
|                                         bias_noise_model); | ||||
| 
 | ||||
|   auto p = imuParams(); | ||||
| 
 | ||||
|   std::shared_ptr<PreintegrationType> preintegrated = | ||||
|       std::make_shared<PreintegratedCombinedMeasurements>(p, prior_imu_bias); | ||||
| 
 | ||||
|   assert(preintegrated); | ||||
| 
 | ||||
|   // Store previous state for imu integration and latest predicted outcome.
 | ||||
|   NavState prev_state(prior_pose, prior_velocity); | ||||
|   NavState prop_state = prev_state; | ||||
|   imuBias::ConstantBias prev_bias = prior_imu_bias; | ||||
| 
 | ||||
|   // Keep track of total error over the entire run as simple performance metric.
 | ||||
|   double current_position_error = 0.0, current_orientation_error = 0.0; | ||||
| 
 | ||||
|   double output_time = 0.0; | ||||
|   double dt = 0.005;  // The real system has noise, but here, results are nearly
 | ||||
|                       // exactly the same, so keeping this for simplicity.
 | ||||
| 
 | ||||
|   // All priors have been set up, now iterate through the data file.
 | ||||
|   while (file.good()) { | ||||
|     // Parse out first value
 | ||||
|     string value; | ||||
|     getline(file, value, ','); | ||||
|     int type = stoi(value.c_str()); | ||||
| 
 | ||||
|     if (type == 0) {  // IMU measurement
 | ||||
|       Vector6 imu; | ||||
|       for (int i = 0; i < 5; ++i) { | ||||
|         getline(file, value, ','); | ||||
|         imu(i) = stof(value.c_str()); | ||||
|       } | ||||
|       getline(file, value, '\n'); | ||||
|       imu(5) = stof(value.c_str()); | ||||
| 
 | ||||
|       // Adding the IMU preintegration.
 | ||||
|       preintegrated->integrateMeasurement(imu.head<3>(), imu.tail<3>(), dt); | ||||
| 
 | ||||
|     } else if (type == 1) {  // GPS measurement
 | ||||
|       Vector7 gps; | ||||
|       for (int i = 0; i < 6; ++i) { | ||||
|         getline(file, value, ','); | ||||
|         gps(i) = stof(value.c_str()); | ||||
|       } | ||||
|       getline(file, value, '\n'); | ||||
|       gps(6) = stof(value.c_str()); | ||||
| 
 | ||||
|       index++; | ||||
| 
 | ||||
|       // Adding IMU factor and GPS factor and optimizing.
 | ||||
|       auto preint_imu_combined = | ||||
|           dynamic_cast<const PreintegratedCombinedMeasurements&>( | ||||
|               *preintegrated); | ||||
|       CombinedImuFactor imu_factor(X(index - 1), V(index - 1), X(index), | ||||
|                                    V(index), B(index - 1), B(index), | ||||
|                                    preint_imu_combined); | ||||
|       graph.add(imu_factor); | ||||
| 
 | ||||
|       auto correction_noise = noiseModel::Isotropic::Sigma(3, 1.0); | ||||
|       GPSFactor gps_factor(X(index), | ||||
|                            Point3(gps(0),   // N,
 | ||||
|                                   gps(1),   // E,
 | ||||
|                                   gps(2)),  // D,
 | ||||
|                            correction_noise); | ||||
|       graph.add(gps_factor); | ||||
| 
 | ||||
|       // Now optimize and compare results.
 | ||||
|       prop_state = preintegrated->predict(prev_state, prev_bias); | ||||
|       initial_values.insert(X(index), prop_state.pose()); | ||||
|       initial_values.insert(V(index), prop_state.v()); | ||||
|       initial_values.insert(B(index), prev_bias); | ||||
| 
 | ||||
|       LevenbergMarquardtParams params; | ||||
|       params.setVerbosityLM("SUMMARY"); | ||||
|       LevenbergMarquardtOptimizer optimizer(graph, initial_values, params); | ||||
|       Values result = optimizer.optimize(); | ||||
| 
 | ||||
|       // Overwrite the beginning of the preintegration for the next step.
 | ||||
|       prev_state = | ||||
|           NavState(result.at<Pose3>(X(index)), result.at<Vector3>(V(index))); | ||||
|       prev_bias = result.at<imuBias::ConstantBias>(B(index)); | ||||
| 
 | ||||
|       // Reset the preintegration object.
 | ||||
|       preintegrated->resetIntegrationAndSetBias(prev_bias); | ||||
| 
 | ||||
|       // Print out the position and orientation error for comparison.
 | ||||
|       Vector3 result_position = prev_state.pose().translation(); | ||||
|       Vector3 position_error = result_position - gps.head<3>(); | ||||
|       current_position_error = position_error.norm(); | ||||
| 
 | ||||
|       Quaternion result_quat = prev_state.pose().rotation().toQuaternion(); | ||||
|       Quaternion gps_quat(gps(6), gps(3), gps(4), gps(5)); | ||||
|       Quaternion quat_error = result_quat * gps_quat.inverse(); | ||||
|       quat_error.normalize(); | ||||
|       Vector3 euler_angle_error(quat_error.x() * 2, quat_error.y() * 2, | ||||
|                                 quat_error.z() * 2); | ||||
|       current_orientation_error = euler_angle_error.norm(); | ||||
| 
 | ||||
|       // display statistics
 | ||||
|       cout << "Position error:" << current_position_error << "\t " | ||||
|            << "Angular error:" << current_orientation_error << "\n" | ||||
|            << endl; | ||||
| 
 | ||||
|       fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n", | ||||
|               output_time, result_position(0), result_position(1), | ||||
|               result_position(2), result_quat.x(), result_quat.y(), | ||||
|               result_quat.z(), result_quat.w(), gps(0), gps(1), gps(2), | ||||
|               gps_quat.x(), gps_quat.y(), gps_quat.z(), gps_quat.w()); | ||||
| 
 | ||||
|       output_time += 1.0; | ||||
| 
 | ||||
|     } else { | ||||
|       cerr << "ERROR parsing file\n"; | ||||
|       return 1; | ||||
|     } | ||||
|   } | ||||
|   fclose(fp_out); | ||||
|   cout << "Complete, results written to " << output_filename << "\n\n"; | ||||
| 
 | ||||
|   return 0; | ||||
| } | ||||
|  | @ -31,19 +31,19 @@ void createExampleBALFile(const string& filename, const vector<Point3>& P, | |||
|         Cal3Bundler()) { | ||||
| 
 | ||||
|   // Class that will gather all data
 | ||||
|   SfM_data data; | ||||
|   SfmData data; | ||||
| 
 | ||||
|   // Create two cameras
 | ||||
|   Rot3 aRb = Rot3::Yaw(M_PI_2); | ||||
|   Point3 aTb(0.1, 0, 0); | ||||
|   Pose3 identity, aPb(aRb, aTb); | ||||
|   data.cameras.push_back(SfM_Camera(pose1, K)); | ||||
|   data.cameras.push_back(SfM_Camera(pose2, K)); | ||||
|   data.cameras.push_back(SfmCamera(pose1, K)); | ||||
|   data.cameras.push_back(SfmCamera(pose2, K)); | ||||
| 
 | ||||
|   for(const Point3& p: P) { | ||||
| 
 | ||||
|     // Create the track
 | ||||
|     SfM_Track track; | ||||
|     SfmTrack track; | ||||
|     track.p = p; | ||||
|     track.r = 1; | ||||
|     track.g = 1; | ||||
|  |  | |||
|  | @ -0,0 +1,6 @@ | |||
| VERTEX_SE3:QUAT 0 -1.6618596980158338 -0.5736497760548741 -3.3319774096611026 -0.02676080288219576 -0.024497002638379624 -0.015064701622500615 0.9992281076190063 | ||||
| VERTEX_SE3:QUAT 1 -1.431820463019384 -0.549139761976065 -3.160677992237872 -0.049543805396343954 -0.03232420352077356 -0.004386230477751116 0.998239108728862 | ||||
| VERTEX_SE3:QUAT 2 -1.0394840214436651 -0.5268841046291037 -2.972143862665523 -0.07993768981394891 0.0825062894866454 -0.04088089479075661 0.9925378735259738 | ||||
| EDGE_SE3:QUAT 0 1 0.23003923499644974 0.02451001407880915 0.17129941742323052 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 | ||||
| EDGE_SE3:QUAT 0 2 0.6223756765721686 0.04676567142577037 0.35983354699557957 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 | ||||
| EDGE_SE3:QUAT 1 2 0.3923364415757189 0.022255657346961222 0.18853412957234905 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 | ||||
|  | @ -0,0 +1,16 @@ | |||
| VERTEX_SE3:QUAT 0 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162 | ||||
| VERTEX_SE3:QUAT 1 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508 | ||||
| VERTEX_SE3:QUAT 2 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10 | ||||
| VERTEX_SE3:QUAT 3 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508 | ||||
| VERTEX_SE3:QUAT 4 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162 | ||||
| VERTEX_SE3:QUAT 5 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567 | ||||
| VERTEX_SE3:QUAT 6 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412 | ||||
| VERTEX_SE3:QUAT 7 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567 | ||||
| VERTEX_TRACKXYZ 0 10 10 10 | ||||
| VERTEX_TRACKXYZ 1 -10 10 10 | ||||
| VERTEX_TRACKXYZ 2 -10 -10 10 | ||||
| VERTEX_TRACKXYZ 3 10 -10 10 | ||||
| VERTEX_TRACKXYZ 4 10 10 -10 | ||||
| VERTEX_TRACKXYZ 5 -10 10 -10 | ||||
| VERTEX_TRACKXYZ 6 -10 -10 -10 | ||||
| VERTEX_TRACKXYZ 7 10 -10 -10 | ||||
|  | @ -0,0 +1,9 @@ | |||
| VERTEX_SE3:QUAT 0 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 1.000000 | ||||
| VERTEX_SE3:QUAT 1 1.001367 0.015390 0.004948 0.190253 0.283162 -0.392318 0.854230 | ||||
| VERTEX_SE3:QUAT 2 1.993500 0.023275 0.003793 -0.351729 -0.597838 0.584174 0.421446 | ||||
| VERTEX_SE3:QUAT 3 2.004291 1.024305 0.018047 0.331798 -0.200659 0.919323 0.067024 | ||||
| VERTEX_SE3:QUAT 4 0.999908 1.055073 0.020212 -0.035697 -0.462490 0.445933 0.765488 | ||||
| EDGE_SE3:QUAT 0 1   1.001367 0.015390 0.004948   0.190253 0.283162 -0.392318 0.854230   10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000   10000.000000 0.000000 0.000000 0.000000 0.000000   10000.000000 0.000000 0.000000 0.000000   10000.000000 0.000000 0.000000   10000.000000 0.000000   10000.000000 | ||||
| EDGE_SE3:QUAT 1 2   0.523923 0.776654 0.326659   0.311512 0.656877 -0.678505 0.105373   10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000   10000.000000 0.000000 0.000000 0.000000 0.000000   10000.000000 0.000000 0.000000 0.000000   10000.000000 0.000000 0.000000   10000.000000 0.000000   10000.000000 | ||||
| EDGE_SE3:QUAT 2 3   0.910927 0.055169 -0.411761   0.595795 -0.561677 0.079353 0.568551   10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000   10000.000000 0.000000 0.000000 0.000000 0.000000   10000.000000 0.000000 0.000000 0.000000   10000.000000 0.000000 0.000000   10000.000000 0.000000   10000.000000 | ||||
| EDGE_SE3:QUAT 3 4   0.775288 0.228798 -0.596923   -0.592077 0.303380 -0.513226 0.542221   10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000   10000.000000 0.000000 0.000000 0.000000 0.000000   10000.000000 0.000000 0.000000 0.000000   10000.000000 0.000000 0.000000   10000.000000 0.000000   10000.000000 | ||||
|  | @ -0,0 +1,11 @@ | |||
| VERTEX_SE3:QUAT 0 0 0 0 0 0 0 1 | ||||
| VERTEX_SE3:QUAT 1 0 0 0 0 0 0 1 | ||||
| VERTEX_SE3:QUAT 2 0 0 0 0.00499994 0.00499994 0.00499994 0.999963 | ||||
| VERTEX_SE3:QUAT 3 0 0 0 -0.00499994 -0.00499994 -0.00499994 0.999963 | ||||
| VERTEX_SE3:QUAT 4 0 0 0 0.00499994 0.00499994 0.00499994 0.999963 | ||||
| EDGE_SE3:QUAT 1 2 1 2 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 | ||||
| EDGE_SE3:QUAT 2 3 -3.26795e-07 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 | ||||
| EDGE_SE3:QUAT 3 4 1 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 | ||||
| EDGE_SE3:QUAT 3 1 6.9282e-07 2 0 0 0 1 1.73205e-07 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 | ||||
| EDGE_SE3:QUAT 1 4 -1 1 0 0 0 -0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 | ||||
| EDGE_SE3:QUAT 0 1 0 0 0 0 0 0 1 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 | ||||
|  | @ -0,0 +1,83 @@ | |||
| /* ----------------------------------------------------------------------------
 | ||||
| 
 | ||||
|  * GTSAM Copyright 2010, Georgia Tech Research Corporation, | ||||
|  * Atlanta, Georgia 30332-0415 | ||||
|  * All Rights Reserved | ||||
|  * Authors: Frank Dellaert, et al. (see THANKS for the full author list) | ||||
| 
 | ||||
|  * See LICENSE for the license information | ||||
| 
 | ||||
|  * -------------------------------------------------------------------------- */ | ||||
| 
 | ||||
| /**
 | ||||
|  * @file  DiscreteBayesNetExample.cpp | ||||
|  * @brief   Discrete Bayes Net example with famous Asia Bayes Network | ||||
|  * @author  Frank Dellaert | ||||
|  * @date  JULY 10, 2020 | ||||
|  */ | ||||
| 
 | ||||
| #include <gtsam/discrete/DiscreteFactorGraph.h> | ||||
| #include <gtsam/discrete/DiscreteMarginals.h> | ||||
| #include <gtsam/inference/BayesNet.h> | ||||
| 
 | ||||
| #include <iomanip> | ||||
| 
 | ||||
| using namespace std; | ||||
| using namespace gtsam; | ||||
| 
 | ||||
| int main(int argc, char **argv) { | ||||
|   DiscreteBayesNet asia; | ||||
|   DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2), LungCancer(6, 2), | ||||
|       Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2); | ||||
|   asia.add(Asia % "99/1"); | ||||
|   asia.add(Smoking % "50/50"); | ||||
| 
 | ||||
|   asia.add(Tuberculosis | Asia = "99/1 95/5"); | ||||
|   asia.add(LungCancer | Smoking = "99/1 90/10"); | ||||
|   asia.add(Bronchitis | Smoking = "70/30 40/60"); | ||||
| 
 | ||||
|   asia.add((Either | Tuberculosis, LungCancer) = "F T T T"); | ||||
| 
 | ||||
|   asia.add(XRay | Either = "95/5 2/98"); | ||||
|   asia.add((Dyspnea | Either, Bronchitis) = "9/1 2/8 3/7 1/9"); | ||||
| 
 | ||||
|   // print
 | ||||
|   vector<string> pretty = {"Asia",    "Dyspnea", "XRay",       "Tuberculosis", | ||||
|                            "Smoking", "Either",  "LungCancer", "Bronchitis"}; | ||||
|   auto formatter = [pretty](Key key) { return pretty[key]; }; | ||||
|   asia.print("Asia", formatter); | ||||
| 
 | ||||
|   // Convert to factor graph
 | ||||
|   DiscreteFactorGraph fg(asia); | ||||
| 
 | ||||
|   // Create solver and eliminate
 | ||||
|   Ordering ordering; | ||||
|   ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7); | ||||
|   DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering); | ||||
| 
 | ||||
|   // solve
 | ||||
|   DiscreteFactor::sharedValues mpe = chordal->optimize(); | ||||
|   GTSAM_PRINT(*mpe); | ||||
| 
 | ||||
|   // We can also build a Bayes tree (directed junction tree).
 | ||||
|   // The elimination order above will do fine:
 | ||||
|   auto bayesTree = fg.eliminateMultifrontal(ordering); | ||||
|   bayesTree->print("bayesTree", formatter); | ||||
| 
 | ||||
|   // add evidence, we were in Asia and we have dyspnea
 | ||||
|   fg.add(Asia, "0 1"); | ||||
|   fg.add(Dyspnea, "0 1"); | ||||
| 
 | ||||
|   // solve again, now with evidence
 | ||||
|   DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering); | ||||
|   DiscreteFactor::sharedValues mpe2 = chordal2->optimize(); | ||||
|   GTSAM_PRINT(*mpe2); | ||||
| 
 | ||||
|   // We can also sample from it
 | ||||
|   cout << "\n10 samples:" << endl; | ||||
|   for (size_t i = 0; i < 10; i++) { | ||||
|     DiscreteFactor::sharedValues sample = chordal2->sample(); | ||||
|     GTSAM_PRINT(*sample); | ||||
|   } | ||||
|   return 0; | ||||
| } | ||||
|  | @ -15,105 +15,106 @@ | |||
|  * @author  Abhijit | ||||
|  * @date  Jun 4, 2012 | ||||
|  * | ||||
|  * We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009, p529] | ||||
|  * You may be familiar with other graphical model packages like BNT (available | ||||
|  * at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this is used as an
 | ||||
|  * example. The following demo is same as that in the above link, except that | ||||
|  * everything is using GTSAM. | ||||
|  * We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009, | ||||
|  * p529] You may be familiar with other graphical model packages like BNT | ||||
|  * (available at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this
 | ||||
|  * is used as an example. The following demo is same as that in the above link, | ||||
|  * except that everything is using GTSAM. | ||||
|  */ | ||||
| 
 | ||||
| #include <gtsam/discrete/DiscreteFactorGraph.h> | ||||
| #include <gtsam/discrete/DiscreteSequentialSolver.h> | ||||
| #include <gtsam/discrete/DiscreteMarginals.h> | ||||
| 
 | ||||
| #include <iomanip> | ||||
| 
 | ||||
| using namespace std; | ||||
| using namespace gtsam; | ||||
| 
 | ||||
| int main(int argc, char **argv) { | ||||
|   // Define keys and a print function
 | ||||
|   Key C(1), S(2), R(3), W(4); | ||||
|   auto print = [=](DiscreteFactor::sharedValues values) { | ||||
|     cout << boolalpha << "Cloudy = " << static_cast<bool>((*values)[C]) | ||||
|          << "  Sprinkler = " << static_cast<bool>((*values)[S]) | ||||
|          << "  Rain = " << boolalpha << static_cast<bool>((*values)[R]) | ||||
|          << "  WetGrass = " << static_cast<bool>((*values)[W]) << endl; | ||||
|   }; | ||||
| 
 | ||||
|   // We assume binary state variables
 | ||||
|   // we have 0 == "False" and 1 == "True"
 | ||||
|   const size_t nrStates = 2; | ||||
| 
 | ||||
|   // define variables
 | ||||
|   DiscreteKey Cloudy(1, nrStates), Sprinkler(2, nrStates), Rain(3, nrStates), | ||||
|       WetGrass(4, nrStates); | ||||
|   DiscreteKey Cloudy(C, nrStates), Sprinkler(S, nrStates), Rain(R, nrStates), | ||||
|       WetGrass(W, nrStates); | ||||
| 
 | ||||
|   // create Factor Graph of the bayes net
 | ||||
|   DiscreteFactorGraph graph; | ||||
| 
 | ||||
|   // add factors
 | ||||
|   graph.add(Cloudy, "0.5 0.5"); //P(Cloudy)
 | ||||
|   graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); //P(Sprinkler | Cloudy)
 | ||||
|   graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); //P(Rain | Cloudy)
 | ||||
|   graph.add(Cloudy, "0.5 0.5");                      // P(Cloudy)
 | ||||
|   graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1");  // P(Sprinkler | Cloudy)
 | ||||
|   graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8");       // P(Rain | Cloudy)
 | ||||
|   graph.add(Sprinkler & Rain & WetGrass, | ||||
|       "1 0 0.1 0.9 0.1 0.9 0.001 0.99"); //P(WetGrass | Sprinkler, Rain)
 | ||||
|             "1 0 0.1 0.9 0.1 0.9 0.001 0.99");  // P(WetGrass | Sprinkler, Rain)
 | ||||
| 
 | ||||
|   // Alternatively we can also create a DiscreteBayesNet, add DiscreteConditional
 | ||||
|   // factors and create a FactorGraph from it. (See testDiscreteBayesNet.cpp)
 | ||||
|   // Alternatively we can also create a DiscreteBayesNet, add
 | ||||
|   // DiscreteConditional factors and create a FactorGraph from it. (See
 | ||||
|   // testDiscreteBayesNet.cpp)
 | ||||
| 
 | ||||
|   // Since this is a relatively small distribution, we can as well print
 | ||||
|   // the whole distribution..
 | ||||
|   cout << "Distribution of Example: " << endl; | ||||
|   cout << setw(11) << "Cloudy(C)" << setw(14) << "Sprinkler(S)" << setw(10) | ||||
|       << "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)" | ||||
|       << endl; | ||||
|        << "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)" | ||||
|        << endl; | ||||
|   for (size_t a = 0; a < nrStates; a++) | ||||
|     for (size_t m = 0; m < nrStates; m++) | ||||
|       for (size_t h = 0; h < nrStates; h++) | ||||
|         for (size_t c = 0; c < nrStates; c++) { | ||||
|           DiscreteFactor::Values values; | ||||
|           values[Cloudy.first] = c; | ||||
|           values[Sprinkler.first] = h; | ||||
|           values[Rain.first] = m; | ||||
|           values[WetGrass.first] = a; | ||||
|           values[C] = c; | ||||
|           values[S] = h; | ||||
|           values[R] = m; | ||||
|           values[W] = a; | ||||
|           double prodPot = graph(values); | ||||
|           cout << boolalpha << setw(8) << (bool) c << setw(14) | ||||
|               << (bool) h << setw(12) << (bool) m << setw(13) | ||||
|               << (bool) a << setw(16) << prodPot << endl; | ||||
|           cout << setw(8) << static_cast<bool>(c) << setw(14) | ||||
|                << static_cast<bool>(h) << setw(12) << static_cast<bool>(m) | ||||
|                << setw(13) << static_cast<bool>(a) << setw(16) << prodPot | ||||
|                << endl; | ||||
|         } | ||||
| 
 | ||||
| 
 | ||||
|   // "Most Probable Explanation", i.e., configuration with largest value
 | ||||
|   DiscreteSequentialSolver solver(graph); | ||||
|   DiscreteFactor::sharedValues optimalDecoding = solver.optimize(); | ||||
|   cout <<"\nMost Probable Explanation (MPE):" << endl; | ||||
|   cout << boolalpha << "Cloudy = " << (bool)(*optimalDecoding)[Cloudy.first] | ||||
|                   << "  Sprinkler = " << (bool)(*optimalDecoding)[Sprinkler.first] | ||||
|                   << "  Rain = " << boolalpha << (bool)(*optimalDecoding)[Rain.first] | ||||
|                   << "  WetGrass = " << (bool)(*optimalDecoding)[WetGrass.first]<< endl; | ||||
|   DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize(); | ||||
|   cout << "\nMost Probable Explanation (MPE):" << endl; | ||||
|   print(mpe); | ||||
| 
 | ||||
|   // "Inference" We show an inference query like: probability that the Sprinkler
 | ||||
|   // was on; given that the grass is wet i.e. P( S | C=0) = ?
 | ||||
| 
 | ||||
|   // "Inference" We show an inference query like: probability that the Sprinkler was on;
 | ||||
|   // given that the grass is wet i.e. P( S | W=1) =?
 | ||||
|   cout << "\nInference Query: Probability of Sprinkler being on given Grass is Wet" << endl; | ||||
|   // add evidence that it is not Cloudy
 | ||||
|   graph.add(Cloudy, "1 0"); | ||||
| 
 | ||||
|   // Method 1: we can compute the joint marginal P(S,W) and from that we can compute
 | ||||
|   // P(S | W=1) = P(S,W=1)/P(W=1) We do this in following three steps..
 | ||||
|   // solve again, now with evidence
 | ||||
|   DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential(); | ||||
|   DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize(); | ||||
| 
 | ||||
|   //Step1: Compute P(S,W)
 | ||||
|   DiscreteFactorGraph jointFG; | ||||
|   jointFG = *solver.jointFactorGraph(DiscreteKeys(Sprinkler & WetGrass).indices()); | ||||
|   DecisionTreeFactor probSW = jointFG.product(); | ||||
|   cout << "\nMPE given C=0:" << endl; | ||||
|   print(mpe_with_evidence); | ||||
| 
 | ||||
|   //Step2: Compute P(W)
 | ||||
|   DiscreteFactor::shared_ptr probW = solver.marginalFactor(WetGrass.first); | ||||
| 
 | ||||
|   //Step3: Computer P(S | W=1) = P(S,W=1)/P(W=1)
 | ||||
|   DiscreteFactor::Values values; | ||||
|   values[WetGrass.first] = 1; | ||||
| 
 | ||||
|   //print P(S=0|W=1)
 | ||||
|   values[Sprinkler.first] = 0; | ||||
|   cout << "P(S=0|W=1) = " << probSW(values)/(*probW)(values) << endl; | ||||
| 
 | ||||
|   //print P(S=1|W=1)
 | ||||
|   values[Sprinkler.first] = 1; | ||||
|   cout << "P(S=1|W=1) = " << probSW(values)/(*probW)(values) << endl; | ||||
| 
 | ||||
|   // TODO: Method 2 : One way is to modify the factor graph to
 | ||||
|   // incorporate the evidence node and compute the marginal
 | ||||
|   // TODO: graph.addEvidence(Cloudy,0);
 | ||||
|   // we can also calculate arbitrary marginals:
 | ||||
|   DiscreteMarginals marginals(graph); | ||||
|   cout << "\nP(S=1|C=0):" << marginals.marginalProbabilities(Sprinkler)[1] | ||||
|        << endl; | ||||
|   cout << "\nP(R=0|C=0):" << marginals.marginalProbabilities(Rain)[0] << endl; | ||||
|   cout << "\nP(W=1|C=0):" << marginals.marginalProbabilities(WetGrass)[1] | ||||
|        << endl; | ||||
| 
 | ||||
|   // We can also sample from it
 | ||||
|   cout << "\n10 samples:" << endl; | ||||
|   for (size_t i = 0; i < 10; i++) { | ||||
|     DiscreteFactor::sharedValues sample = chordal->sample(); | ||||
|     print(sample); | ||||
|   } | ||||
|   return 0; | ||||
| } | ||||
|  |  | |||
|  | @ -0,0 +1,130 @@ | |||
| /* ----------------------------------------------------------------------------
 | ||||
| 
 | ||||
|  * GTSAM Copyright 2010, Georgia Tech Research Corporation, | ||||
|  * Atlanta, Georgia 30332-0415 | ||||
|  * All Rights Reserved | ||||
|  * Authors: Frank Dellaert, et al. (see THANKS for the full author list) | ||||
| 
 | ||||
|  * See LICENSE for the license information | ||||
| 
 | ||||
|  * -------------------------------------------------------------------------- */ | ||||
| 
 | ||||
| /**
 | ||||
|  * @file    FisheyeExample.cpp | ||||
|  * @brief   A visualSLAM example for the structure-from-motion problem on a | ||||
|  * simulated dataset. This version uses a fisheye camera model and a GaussNewton | ||||
|  * solver to solve the graph in one batch | ||||
|  * @author  ghaggin | ||||
|  * @Date    Apr 9,2020 | ||||
|  */ | ||||
| 
 | ||||
| /**
 | ||||
|  * A structure-from-motion example with landmarks | ||||
|  *  - The landmarks form a 10 meter cube | ||||
|  *  - The robot rotates around the landmarks, always facing towards the cube | ||||
|  */ | ||||
| 
 | ||||
| // For loading the data
 | ||||
| #include "SFMdata.h" | ||||
| 
 | ||||
| // Camera observations of landmarks will be stored as Point2 (x, y).
 | ||||
| #include <gtsam/geometry/Point2.h> | ||||
| 
 | ||||
| // Each variable in the system (poses and landmarks) must be identified with a
 | ||||
| // unique key. We can either use simple integer keys (1, 2, 3, ...) or symbols
 | ||||
| // (X1, X2, L1). Here we will use Symbols
 | ||||
| #include <gtsam/inference/Symbol.h> | ||||
| 
 | ||||
| // Use GaussNewtonOptimizer to solve graph
 | ||||
| #include <gtsam/nonlinear/GaussNewtonOptimizer.h> | ||||
| #include <gtsam/nonlinear/NonlinearFactorGraph.h> | ||||
| #include <gtsam/nonlinear/Values.h> | ||||
| 
 | ||||
| // In GTSAM, measurement functions are represented as 'factors'. Several common
 | ||||
| // factors have been provided with the library for solving robotics/SLAM/Bundle
 | ||||
| // Adjustment problems. Here we will use Projection factors to model the
 | ||||
| // camera's landmark observations. Also, we will initialize the robot at some
 | ||||
| // location using a Prior factor.
 | ||||
| #include <gtsam/geometry/Cal3Fisheye.h> | ||||
| #include <gtsam/slam/PriorFactor.h> | ||||
| #include <gtsam/slam/ProjectionFactor.h> | ||||
| 
 | ||||
| #include <fstream> | ||||
| #include <vector> | ||||
| 
 | ||||
| using namespace std; | ||||
| using namespace gtsam; | ||||
| 
 | ||||
| using symbol_shorthand::L;  // for landmarks
 | ||||
| using symbol_shorthand::X;  // for poses
 | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| int main(int argc, char *argv[]) { | ||||
|   // Define the camera calibration parameters
 | ||||
|   auto K = boost::make_shared<Cal3Fisheye>( | ||||
|       278.66, 278.48, 0.0, 319.75, 241.96, -0.013721808247486035, | ||||
|       0.020727425669427896, -0.012786476702685545, 0.0025242267320687625); | ||||
| 
 | ||||
|   // Define the camera observation noise model, 1 pixel stddev
 | ||||
|   auto measurementNoise = noiseModel::Isotropic::Sigma(2, 1.0); | ||||
| 
 | ||||
|   // Create the set of ground-truth landmarks
 | ||||
|   const vector<Point3> points = createPoints(); | ||||
| 
 | ||||
|   // Create the set of ground-truth poses
 | ||||
|   const vector<Pose3> poses = createPoses(); | ||||
| 
 | ||||
|   // Create a Factor Graph and Values to hold the new data
 | ||||
|   NonlinearFactorGraph graph; | ||||
|   Values initialEstimate; | ||||
| 
 | ||||
|   // Add a prior on pose x0, 0.1 rad on roll,pitch,yaw, and 30cm std on x,y,z
 | ||||
|   auto posePrior = noiseModel::Diagonal::Sigmas( | ||||
|       (Vector(6) << Vector3::Constant(0.1), Vector3::Constant(0.3)).finished()); | ||||
|   graph.emplace_shared<PriorFactor<Pose3>>(X(0), poses[0], posePrior); | ||||
| 
 | ||||
|   // Add a prior on landmark l0
 | ||||
|   auto pointPrior = noiseModel::Isotropic::Sigma(3, 0.1); | ||||
|   graph.emplace_shared<PriorFactor<Point3>>(L(0), points[0], pointPrior); | ||||
| 
 | ||||
|   // Add initial guesses to all observed landmarks
 | ||||
|   // Intentionally initialize the variables off from the ground truth
 | ||||
|   static const Point3 kDeltaPoint(-0.25, 0.20, 0.15); | ||||
|   for (size_t j = 0; j < points.size(); ++j) | ||||
|     initialEstimate.insert<Point3>(L(j), points[j] + kDeltaPoint); | ||||
| 
 | ||||
|   // Loop over the poses, adding the observations to the graph
 | ||||
|   for (size_t i = 0; i < poses.size(); ++i) { | ||||
|     // Add factors for each landmark observation
 | ||||
|     for (size_t j = 0; j < points.size(); ++j) { | ||||
|       PinholeCamera<Cal3Fisheye> camera(poses[i], *K); | ||||
|       Point2 measurement = camera.project(points[j]); | ||||
|       graph.emplace_shared<GenericProjectionFactor<Pose3, Point3, Cal3Fisheye>>( | ||||
|           measurement, measurementNoise, X(i), L(j), K); | ||||
|     } | ||||
| 
 | ||||
|     // Add an initial guess for the current pose
 | ||||
|     // Intentionally initialize the variables off from the ground truth
 | ||||
|     static const Pose3 kDeltaPose(Rot3::Rodrigues(-0.1, 0.2, 0.25), | ||||
|                                   Point3(0.05, -0.10, 0.20)); | ||||
|     initialEstimate.insert(X(i), poses[i] * kDeltaPose); | ||||
|   } | ||||
| 
 | ||||
|   GaussNewtonParams params; | ||||
|   params.setVerbosity("TERMINATION"); | ||||
|   params.maxIterations = 10000; | ||||
| 
 | ||||
|   std::cout << "Optimizing the factor graph" << std::endl; | ||||
|   GaussNewtonOptimizer optimizer(graph, initialEstimate, params); | ||||
|   Values result = optimizer.optimize(); | ||||
|   std::cout << "Optimization complete" << std::endl; | ||||
| 
 | ||||
|   std::cout << "initial error=" << graph.error(initialEstimate) << std::endl; | ||||
|   std::cout << "final error=" << graph.error(result) << std::endl; | ||||
| 
 | ||||
|   std::ofstream os("examples/vio_batch.dot"); | ||||
|   graph.saveGraph(os, result); | ||||
| 
 | ||||
|   return 0; | ||||
| } | ||||
| /* ************************************************************************* */ | ||||
|  | @ -0,0 +1,94 @@ | |||
| /* ----------------------------------------------------------------------------
 | ||||
| 
 | ||||
|  * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, | ||||
|  * Atlanta, Georgia 30332-0415 | ||||
|  * All Rights Reserved | ||||
|  * Authors: Frank Dellaert, et al. (see THANKS for the full author list) | ||||
| 
 | ||||
|  * See LICENSE for the license information | ||||
| 
 | ||||
|  * -------------------------------------------------------------------------- */ | ||||
| 
 | ||||
| /**
 | ||||
|  * @file  DiscreteBayesNetExample.cpp | ||||
|  * @brief   Hidden Markov Model example, discrete. | ||||
|  * @author  Frank Dellaert | ||||
|  * @date  July 12, 2020 | ||||
|  */ | ||||
| 
 | ||||
| #include <gtsam/discrete/DiscreteFactorGraph.h> | ||||
| #include <gtsam/discrete/DiscreteMarginals.h> | ||||
| #include <gtsam/inference/BayesNet.h> | ||||
| 
 | ||||
| #include <iomanip> | ||||
| #include <sstream> | ||||
| 
 | ||||
| using namespace std; | ||||
| using namespace gtsam; | ||||
| 
 | ||||
| int main(int argc, char **argv) { | ||||
|   const int nrNodes = 4; | ||||
|   const size_t nrStates = 3; | ||||
| 
 | ||||
|   // Define variables as well as ordering
 | ||||
|   Ordering ordering; | ||||
|   vector<DiscreteKey> keys; | ||||
|   for (int k = 0; k < nrNodes; k++) { | ||||
|     DiscreteKey key_i(k, nrStates); | ||||
|     keys.push_back(key_i); | ||||
|     ordering.emplace_back(k); | ||||
|   } | ||||
| 
 | ||||
|   // Create HMM as a DiscreteBayesNet
 | ||||
|   DiscreteBayesNet hmm; | ||||
| 
 | ||||
|   // Define backbone
 | ||||
|   const string transition = "8/1/1 1/8/1 1/1/8"; | ||||
|   for (int k = 1; k < nrNodes; k++) { | ||||
|     hmm.add(keys[k] | keys[k - 1] = transition); | ||||
|   } | ||||
| 
 | ||||
|   // Add some measurements, not needed for all time steps!
 | ||||
|   hmm.add(keys[0] % "7/2/1"); | ||||
|   hmm.add(keys[1] % "1/9/0"); | ||||
|   hmm.add(keys.back() % "5/4/1"); | ||||
| 
 | ||||
|   // print
 | ||||
|   hmm.print("HMM"); | ||||
| 
 | ||||
|   // Convert to factor graph
 | ||||
|   DiscreteFactorGraph factorGraph(hmm); | ||||
| 
 | ||||
|   // Create solver and eliminate
 | ||||
|   // This will create a DAG ordered with arrow of time reversed
 | ||||
|   DiscreteBayesNet::shared_ptr chordal = | ||||
|       factorGraph.eliminateSequential(ordering); | ||||
|   chordal->print("Eliminated"); | ||||
| 
 | ||||
|   // solve
 | ||||
|   DiscreteFactor::sharedValues mpe = chordal->optimize(); | ||||
|   GTSAM_PRINT(*mpe); | ||||
| 
 | ||||
|   // We can also sample from it
 | ||||
|   cout << "\n10 samples:" << endl; | ||||
|   for (size_t k = 0; k < 10; k++) { | ||||
|     DiscreteFactor::sharedValues sample = chordal->sample(); | ||||
|     GTSAM_PRINT(*sample); | ||||
|   } | ||||
| 
 | ||||
|   // Or compute the marginals. This re-eliminates the FG into a Bayes tree
 | ||||
|   cout << "\nComputing Node Marginals .." << endl; | ||||
|   DiscreteMarginals marginals(factorGraph); | ||||
|   for (int k = 0; k < nrNodes; k++) { | ||||
|     Vector margProbs = marginals.marginalProbabilities(keys[k]); | ||||
|     stringstream ss; | ||||
|     ss << "marginal " << k; | ||||
|     print(margProbs, ss.str()); | ||||
|   } | ||||
| 
 | ||||
|   // TODO(frank): put in the glue to have DiscreteMarginals produce *arbitrary*
 | ||||
|   // joints efficiently, by the Bayes tree shortcut magic. All the code is there
 | ||||
|   // but it's not yet connected.
 | ||||
| 
 | ||||
|   return 0; | ||||
| } | ||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue