diff --git a/.github/scripts/python.sh b/.github/scripts/python.sh new file mode 100644 index 000000000..6948cc385 --- /dev/null +++ b/.github/scripts/python.sh @@ -0,0 +1,127 @@ +#!/bin/bash + +########################################################## +# Build and test the GTSAM Python wrapper. +########################################################## + +set -x -e + +# install TBB with _debug.so files +function install_tbb() +{ + TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download + TBB_VERSION=4.4.5 + TBB_DIR=tbb44_20160526oss + TBB_SAVEPATH="/tmp/tbb.tgz" + + if [ "$(uname)" == "Linux" ]; then + OS_SHORT="lin" + TBB_LIB_DIR="intel64/gcc4.4" + SUDO="sudo" + + elif [ "$(uname)" == "Darwin" ]; then + OS_SHORT="osx" + TBB_LIB_DIR="" + SUDO="" + + fi + + wget "${TBB_BASEURL}/${TBB_VERSION}/${TBB_DIR}_${OS_SHORT}.tgz" -O $TBB_SAVEPATH + tar -C /tmp -xf $TBB_SAVEPATH + + TBBROOT=/tmp/$TBB_DIR + # Copy the needed files to the correct places. + # This works correctly for CI builds, instead of setting path variables. + # This is what Homebrew does to install TBB on Macs + $SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/ + $SUDO cp -R $TBBROOT/include/ /usr/local/include/ + +} + +if [ -z ${PYTHON_VERSION+x} ]; then + echo "Please provide the Python version to build against!" + exit 127 +fi + +if [ -z ${WRAPPER+x} ]; then + echo "Please provide the wrapper to build!" + exit 126 +fi + +PYTHON="python${PYTHON_VERSION}" + +if [[ $(uname) == "Darwin" ]]; then + brew install wget +else + # Install a system package required by our library + sudo apt-get install -y wget libicu-dev python3-pip python3-setuptools +fi + +PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin + +[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb + +case $WRAPPER in +"cython") + BUILD_CYTHON="ON" + BUILD_PYBIND="OFF" + TYPEDEF_POINTS_TO_VECTORS="OFF" + + sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/cython/requirements.txt + ;; +"pybind") + BUILD_CYTHON="OFF" + BUILD_PYBIND="ON" + TYPEDEF_POINTS_TO_VECTORS="ON" + + sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt + ;; +*) + exit 126 + ;; +esac + +mkdir $GITHUB_WORKSPACE/build +cd $GITHUB_WORKSPACE/build + +cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release \ + -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \ + -DGTSAM_USE_QUATERNIONS=OFF \ + -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \ + -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ + -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ + -DGTSAM_INSTALL_CYTHON_TOOLBOX=${BUILD_CYTHON} \ + -DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \ + -DGTSAM_TYPEDEF_POINTS_TO_VECTORS=${TYPEDEF_POINTS_TO_VECTORS} \ + -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \ + -DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \ + -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \ + -DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install + +make -j$(nproc) install & + +while ps -p $! > /dev/null +do + sleep 60 + now=$(date +%s) + printf "%d seconds have elapsed\n" $(( (now - start) )) +done + +case $WRAPPER in +"cython") + cd $GITHUB_WORKSPACE/build/cython + $PYTHON setup.py install --user --prefix= + cd $GITHUB_WORKSPACE/build/cython/gtsam/tests + $PYTHON -m unittest discover + ;; +"pybind") + cd $GITHUB_WORKSPACE/build/python + $PYTHON setup.py install --user --prefix= + cd $GITHUB_WORKSPACE/python/gtsam/tests + $PYTHON -m unittest discover + ;; +*) + echo "THIS SHOULD NEVER HAPPEN!" + exit 125 + ;; +esac \ No newline at end of file diff --git a/.travis.sh b/.github/scripts/unix.sh old mode 100755 new mode 100644 similarity index 72% rename from .travis.sh rename to .github/scripts/unix.sh index 7777e9919..f85e67dc1 --- a/.travis.sh +++ b/.github/scripts/unix.sh @@ -1,20 +1,25 @@ #!/bin/bash +########################################################## +# Build and test GTSAM for *nix based systems. +# Specifically Linux and macOS. +########################################################## + # install TBB with _debug.so files function install_tbb() { TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download - TBB_VERSION=4.4.2 - TBB_DIR=tbb44_20151115oss + TBB_VERSION=4.4.5 + TBB_DIR=tbb44_20160526oss TBB_SAVEPATH="/tmp/tbb.tgz" - if [ "$TRAVIS_OS_NAME" == "linux" ]; then + if [ "$(uname)" == "Linux" ]; then OS_SHORT="lin" TBB_LIB_DIR="intel64/gcc4.4" SUDO="sudo" - elif [ "$TRAVIS_OS_NAME" == "osx" ]; then - OS_SHORT="lin" + elif [ "$(uname)" == "Darwin" ]; then + OS_SHORT="osx" TBB_LIB_DIR="" SUDO="" @@ -25,7 +30,7 @@ function install_tbb() TBBROOT=/tmp/$TBB_DIR # Copy the needed files to the correct places. - # This works correctly for travis builds, instead of setting path variables. + # This works correctly for CI builds, instead of setting path variables. # This is what Homebrew does to install TBB on Macs $SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/ $SUDO cp -R $TBBROOT/include/ /usr/local/include/ @@ -38,15 +43,14 @@ function configure() set -e # Make sure any error makes the script to return an error code set -x # echo - SOURCE_DIR=`pwd` - BUILD_DIR=build + SOURCE_DIR=$GITHUB_WORKSPACE + BUILD_DIR=$GITHUB_WORKSPACE/build #env - git clean -fd || true rm -fr $BUILD_DIR || true mkdir $BUILD_DIR && cd $BUILD_DIR - install_tbb + [ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb if [ ! -z "$GCC_VERSION" ]; then export CC=gcc-$GCC_VERSION @@ -59,11 +63,13 @@ function configure() -DGTSAM_BUILD_TESTS=${GTSAM_BUILD_TESTS:-OFF} \ -DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \ -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \ - -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \ - -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \ + -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \ + -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=OFF + -DBOOST_ROOT=$BOOST_ROOT \ + -DBoost_NO_SYSTEM_PATHS=ON \ + -DBoost_ARCHITECTURE=-x64 } @@ -71,7 +77,7 @@ function configure() function finish () { # Print ccache stats - ccache -s + [ -x "$(command -v ccache)" ] && ccache -s cd $SOURCE_DIR } @@ -111,4 +117,4 @@ case $1 in -t) test ;; -esac +esac \ No newline at end of file diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml new file mode 100644 index 000000000..afe328c3b --- /dev/null +++ b/.github/workflows/build-linux.yml @@ -0,0 +1,78 @@ +name: Linux CI + +on: [push, pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + ubuntu-18.04-gcc-5, + ubuntu-18.04-gcc-9, + ubuntu-18.04-clang-9, + ] + + build_type: [Debug, Release] + build_unstable: [ON] + include: + - name: ubuntu-18.04-gcc-5 + os: ubuntu-18.04 + compiler: gcc + version: "5" + + - name: ubuntu-18.04-gcc-9 + os: ubuntu-18.04 + compiler: gcc + version: "9" + + - name: ubuntu-18.04-clang-9 + os: ubuntu-18.04 + compiler: clang + version: "9" + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + fi + sudo apt-get -y update + + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy + + echo "::set-env name=BOOST_ROOT::$(echo $BOOST_ROOT_1_69_0)" + echo "::set-env name=LD_LIBRARY_PATH::$(echo $BOOST_ROOT_1_69_0/lib)" + + if [ "${{ matrix.compiler }}" = "gcc" ]; then + sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "::set-env name=CC::clang-${{ matrix.version }}" + echo "::set-env name=CXX::clang++-${{ matrix.version }}" + fi + - name: Check Boost version + if: runner.os == 'Linux' + run: | + echo "BOOST_ROOT = $BOOST_ROOT" + - name: Build and Test (Linux) + if: runner.os == 'Linux' + run: | + bash .github/scripts/unix.sh -t \ No newline at end of file diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml new file mode 100644 index 000000000..363cd690f --- /dev/null +++ b/.github/workflows/build-macos.yml @@ -0,0 +1,51 @@ +name: macOS CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + macOS-10.15-xcode-11.3.1, + ] + + build_type: [Debug, Release] + build_unstable: [ON] + include: + - name: macOS-10.15-xcode-11.3.1 + os: macOS-10.15 + compiler: xcode + version: "11.3.1" + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + if [ "${{ matrix.compiler }}" = "gcc" ]; then + brew install gcc@${{ matrix.version }} + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "::set-env name=CC::clang" + echo "::set-env name=CXX::clang++" + fi + - name: Build and Test (macOS) + if: runner.os == 'macOS' + run: | + bash .github/scripts/unix.sh -t \ No newline at end of file diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml new file mode 100644 index 000000000..dc03ec6c9 --- /dev/null +++ b/.github/workflows/build-python.yml @@ -0,0 +1,108 @@ +name: Python CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} Python ${{ matrix.python_version }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + PYTHON_VERSION: ${{ matrix.python_version }} + WRAPPER: ${{ matrix.wrapper }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + ubuntu-18.04-gcc-5, + # ubuntu-18.04-gcc-9, # TODO Disabled for now because of timeouts + ubuntu-18.04-clang-9, + macOS-10.15-xcode-11.3.1, + ubuntu-18.04-gcc-5-tbb, + ] + + build_type: [Debug, Release] + python_version: [3] + wrapper: [pybind] + include: + - name: ubuntu-18.04-gcc-5 + os: ubuntu-18.04 + compiler: gcc + version: "5" + + # TODO Disabled for now because of timeouts + # - name: ubuntu-18.04-gcc-9 + # os: ubuntu-18.04 + # compiler: gcc + # version: "9" + + - name: ubuntu-18.04-clang-9 + os: ubuntu-18.04 + compiler: clang + version: "9" + + - name: macOS-10.15-xcode-11.3.1 + os: macOS-10.15 + compiler: xcode + version: "11.3.1" + + - name: ubuntu-18.04-gcc-5-tbb + os: ubuntu-18.04 + compiler: gcc + version: "5" + flag: tbb + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + fi + sudo apt-get -y update + + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev + + if [ "${{ matrix.compiler }}" = "gcc" ]; then + sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "::set-env name=CC::clang-${{ matrix.version }}" + echo "::set-env name=CXX::clang++-${{ matrix.version }}" + fi + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + if [ "${{ matrix.compiler }}" = "gcc" ]; then + brew install gcc@${{ matrix.version }} + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "::set-env name=CC::clang" + echo "::set-env name=CXX::clang++" + fi + - name: Set GTSAM_WITH_TBB Flag + if: matrix.flag == 'tbb' + run: | + echo "::set-env name=GTSAM_WITH_TBB::ON" + echo "GTSAM Uses TBB" + - name: Build (Linux) + if: runner.os == 'Linux' + run: | + bash .github/scripts/python.sh + - name: Build (macOS) + if: runner.os == 'macOS' + run: | + bash .github/scripts/python.sh \ No newline at end of file diff --git a/.github/workflows/build-special.yml b/.github/workflows/build-special.yml new file mode 100644 index 000000000..648365f24 --- /dev/null +++ b/.github/workflows/build-special.yml @@ -0,0 +1,112 @@ +name: Special Cases CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ON + + strategy: + fail-fast: false + + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: + [ + ubuntu-gcc-deprecated, + ubuntu-gcc-quaternions, + ubuntu-gcc-tbb, + ] + + build_type: [Debug, Release] + + include: + - name: ubuntu-gcc-deprecated + os: ubuntu-18.04 + compiler: gcc + version: "9" + flag: deprecated + + - name: ubuntu-gcc-quaternions + os: ubuntu-18.04 + compiler: gcc + version: "9" + flag: quaternions + + - name: ubuntu-gcc-tbb + os: ubuntu-18.04 + compiler: gcc + version: "9" + flag: tbb + + steps: + - name: Checkout + uses: actions/checkout@master + + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + fi + sudo apt-get -y update + + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy + + echo "::set-env name=BOOST_ROOT::$(echo $BOOST_ROOT_1_69_0)" + echo "::set-env name=LD_LIBRARY_PATH::$(echo $BOOST_ROOT_1_69_0/lib)" + + if [ "${{ matrix.compiler }}" = "gcc" ]; then + sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "::set-env name=CC::clang-${{ matrix.version }}" + echo "::set-env name=CXX::clang++-${{ matrix.version }}" + fi + + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + if [ "${{ matrix.compiler }}" = "gcc" ]; then + brew install gcc@${{ matrix.version }} + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "::set-env name=CC::clang" + echo "::set-env name=CXX::clang++" + fi + + - name: Set Allow Deprecated Flag + if: matrix.flag == 'deprecated' + run: | + echo "::set-env name=GTSAM_ALLOW_DEPRECATED_SINCE_V41::ON" + echo "Allow deprecated since version 4.1" + + - name: Set Use Quaternions Flag + if: matrix.flag == 'quaternions' + run: | + echo "::set-env name=GTSAM_USE_QUATERNIONS::ON" + echo "Use Quaternions for rotations" + + - name: Set GTSAM_WITH_TBB Flag + if: matrix.flag == 'tbb' + run: | + echo "::set-env name=GTSAM_WITH_TBB::ON" + echo "GTSAM Uses TBB" + + - name: Build & Test + run: | + bash .github/scripts/unix.sh -t diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml new file mode 100644 index 000000000..b3150a751 --- /dev/null +++ b/.github/workflows/build-windows.yml @@ -0,0 +1,75 @@ +name: Windows CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + windows-2016-cl, + windows-2019-cl, + ] + + build_type: [Debug, Release] + build_unstable: [ON] + include: + - name: windows-2016-cl + os: windows-2016 + compiler: cl + + - name: windows-2019-cl + os: windows-2019 + compiler: cl + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Windows) + if: runner.os == 'Windows' + shell: powershell + run: | + Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh') + scoop install ninja --global + if ("${{ matrix.compiler }}".StartsWith("clang")) { + scoop install llvm --global + } + if ("${{ matrix.compiler }}" -eq "gcc") { + # Chocolatey GCC is broken on the windows-2019 image. + # See: https://github.com/DaanDeMeyer/doctest/runs/231595515 + # See: https://github.community/t5/GitHub-Actions/Something-is-wrong-with-the-chocolatey-installed-version-of-gcc/td-p/32413 + scoop install gcc --global + echo "::set-env name=CC::gcc" + echo "::set-env name=CXX::g++" + } elseif ("${{ matrix.compiler }}" -eq "clang") { + echo "::set-env name=CC::clang" + echo "::set-env name=CXX::clang++" + } else { + echo "::set-env name=CC::${{ matrix.compiler }}" + echo "::set-env name=CXX::${{ matrix.compiler }}" + } + # Scoop modifies the PATH so we make the modified PATH global. + echo "::set-env name=PATH::$env:PATH" + - name: Build (Windows) + if: runner.os == 'Windows' + run: | + cmake -E remove_directory build + echo "BOOST_ROOT_1_72_0: ${env:BOOST_ROOT_1_72_0}" + cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT_1_72_0}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT_1_72_0}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT_1_72_0}\lib" + cmake --build build --config ${{ matrix.build_type }} --target gtsam + cmake --build build --config ${{ matrix.build_type }} --target gtsam_unstable + cmake --build build --config ${{ matrix.build_type }} --target wrap + cmake --build build --config ${{ matrix.build_type }} --target check.base + cmake --build build --config ${{ matrix.build_type }} --target check.base_unstable + cmake --build build --config ${{ matrix.build_type }} --target check.linear \ No newline at end of file diff --git a/.github/workflows/trigger-python.yml b/.github/workflows/trigger-python.yml index 8fad9e7ca..94527e732 100644 --- a/.github/workflows/trigger-python.yml +++ b/.github/workflows/trigger-python.yml @@ -1,6 +1,9 @@ # This triggers Cython builds on `gtsam-manylinux-build` name: Trigger Python Builds -on: push +on: + push: + branches: + - develop jobs: triggerCython: runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 1d89cac25..c2d6ce60f 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ cython/gtsam_wrapper.pxd /CMakeSettings.json # for QtCreator: CMakeLists.txt.user* +xcode/ diff --git a/.travis.python.sh b/.travis.python.sh deleted file mode 100644 index 99506e749..000000000 --- a/.travis.python.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -set -x -e - -if [ -z ${PYTHON_VERSION+x} ]; then - echo "Please provide the Python version to build against!" - exit 127 -fi - -PYTHON="python${PYTHON_VERSION}" - -if [[ $(uname) == "Darwin" ]]; then - brew install wget -else - # Install a system package required by our library - sudo apt-get install wget libicu-dev python3-pip python3-setuptools -fi - -CURRDIR=$(pwd) - -sudo $PYTHON -m pip install -r ./cython/requirements.txt - -mkdir $CURRDIR/build -cd $CURRDIR/build - -cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \ - -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \ - -DGTSAM_USE_QUATERNIONS=OFF \ - -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ - -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ - -DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \ - -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \ - -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \ - -DCMAKE_INSTALL_PREFIX=$CURRDIR/../gtsam_install - -make -j$(nproc) install - -cd cython - -sudo $PYTHON setup.py install - -cd $CURRDIR/cython/gtsam/tests - -$PYTHON -m unittest discover \ No newline at end of file diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 2069f48e0..000000000 --- a/.travis.yml +++ /dev/null @@ -1,140 +0,0 @@ -language: cpp -cache: ccache -dist: xenial - -addons: - apt: - sources: - - ubuntu-toolchain-r-test - - sourceline: 'deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-9 main' - key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' - packages: - - g++-9 - - clang-9 - - build-essential pkg-config - - cmake - - python3-dev libpython-dev - - python3-numpy - - libboost-all-dev - -# before_install: - # - if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update; fi - -install: - - if [ "$TRAVIS_OS_NAME" == "osx" ]; then HOMEBREW_NO_AUTO_UPDATE=1 brew install ccache ; fi - - if [ "$TRAVIS_OS_NAME" == "osx" ]; then export PATH="/usr/local/opt/ccache/libexec:$PATH" ; fi - -# We first do the compile stage specified below, then the matrix expansion specified after. -stages: - - compile - - test - - special - -env: - global: - - MAKEFLAGS="-j3" - - CCACHE_SLOPPINESS=pch_defines,time_macros - -# Compile stage without building examples/tests to populate the caches. -jobs: -# -------- STAGE 1: COMPILE ----------- - include: -# on Mac, GCC - - stage: compile - os: osx - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: osx - compiler: gcc - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Mac, CLANG - - stage: compile - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Linux, GCC - - stage: compile - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Linux, CLANG - - stage: compile - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Linux, with deprecated ON to make sure that path still compiles/tests - - stage: special - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V41=ON - script: bash .travis.sh -b -# on Linux, with GTSAM_WITH_TBB on to make sure GTSAM still compiles/tests - - stage: special - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_WITH_TBB=ON - script: bash .travis.sh -t -# -------- STAGE 2: TESTS ----------- -# on Mac, GCC - - stage: test - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -t - - stage: test - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -t - - stage: test - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -t - - stage: test - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -t - - stage: test - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -t -# on Linux, with quaternions ON to make sure that path still compiles/tests - - stage: special - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release GTSAM_BUILD_UNSTABLE=OFF GTSAM_USE_QUATERNIONS=ON - script: bash .travis.sh -t - - stage: special - os: linux - compiler: gcc - env: PYTHON_VERSION=3 - script: bash .travis.python.sh - - stage: special - os: osx - compiler: clang - env: PYTHON_VERSION=3 - script: bash .travis.python.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index 61730d45a..c2918f1cb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -65,18 +65,20 @@ add_custom_target(uninstall # Configurable Options if(GTSAM_UNSTABLE_AVAILABLE) option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON) + option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON) + option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF) endif() option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON) option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF) -option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON) -option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON) +option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON) +option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON) option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF) option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON) option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF) option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON) +option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF) option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON) -option(GTSAM_TYPEDEF_POINTS_TO_VECTORS "Typedef Point2 and Point3 to Eigen::Vector equivalents" OFF) option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) if(NOT MSVC AND NOT XCODE_VERSION) @@ -99,37 +101,40 @@ endif() # Options relating to MATLAB wrapper # TODO: Check for matlab mex binary before handling building of binaries option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF) -option(GTSAM_INSTALL_CYTHON_TOOLBOX "Enable/Disable installation of Cython toolbox" OFF) -option(GTSAM_BUILD_WRAP "Enable/Disable building of matlab/cython wrap utility (necessary for matlab/cython interface)" ON) -set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of python to build the cython wrapper for (or Default)") +set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of Python to build the wrappers against.") # Check / set dependent variables for MATLAB wrapper -if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND NOT GTSAM_BUILD_WRAP) - message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX or GTSAM_INSTALL_CYTHON_TOOLBOX is enabled, please also enable GTSAM_BUILD_WRAP") -endif() -if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND GTSAM_BUILD_TYPE_POSTFIXES) - set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX}) -endif() -if(GTSAM_INSTALL_WRAP AND NOT GTSAM_BUILD_WRAP) - message(FATAL_ERROR "GTSAM_INSTALL_WRAP is enabled, please also enable GTSAM_BUILD_WRAP") +if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_BUILD_TYPE_POSTFIXES) + set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX}) endif() if(GTSAM_INSTALL_MATLAB_TOOLBOX AND NOT BUILD_SHARED_LIBS) - message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.") + message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.") endif() -if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS) - message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the MATLAB toolbox cannot deal with this yet. Please turn one of the two options off.") -endif() +if(GTSAM_BUILD_PYTHON) + if(GTSAM_UNSTABLE_BUILD_PYTHON) + if (NOT GTSAM_BUILD_UNSTABLE) + message(WARNING "GTSAM_UNSTABLE_BUILD_PYTHON requires the unstable module to be enabled.") + set(GTSAM_UNSTABLE_BUILD_PYTHON OFF) + endif() + endif() -if(GTSAM_INSTALL_CYTHON_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS) - message(FATAL_ERROR "GTSAM_INSTALL_CYTHON_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the CYTHON toolbox cannot deal with this yet. Please turn one of the two options off.") + set(GTSAM_PY_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/python") endif() # Flags for choosing default packaging tools set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator") set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator") +if (CMAKE_GENERATOR STREQUAL "Ninja" AND + ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) OR + (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5))) + # Force colored warnings in Ninja's output, if the compiler has -fdiagnostics-color support. + # Rationale in https://github.com/ninja-build/ninja/issues/814 + add_compile_options(-fdiagnostics-color=always) +endif() + ############################################################################### # Find boost @@ -138,18 +143,18 @@ set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator") # Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT if(MSVC) - # By default, boost only builds static libraries on windows - set(Boost_USE_STATIC_LIBS ON) # only find static libs - # If we ever reset above on windows and, ... - # If we use Boost shared libs, disable auto linking. - # Some libraries, at least Boost Program Options, rely on this to export DLL symbols. - if(NOT Boost_USE_STATIC_LIBS) - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK) - endif() - # Virtual memory range for PCH exceeded on VS2015 - if(MSVC_VERSION LESS 1910) # older than VS2017 - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295) - endif() + # By default, boost only builds static libraries on windows + set(Boost_USE_STATIC_LIBS ON) # only find static libs + # If we ever reset above on windows and, ... + # If we use Boost shared libs, disable auto linking. + # Some libraries, at least Boost Program Options, rely on this to export DLL symbols. + if(NOT Boost_USE_STATIC_LIBS) + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK) + endif() + # Virtual memory range for PCH exceeded on VS2015 + if(MSVC_VERSION LESS 1910) # older than VS2017 + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295) + endif() endif() # If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT() @@ -157,7 +162,7 @@ endif() # See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017 # if(MSVC AND BUILD_SHARED_LIBS) - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) endif() # Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such. @@ -227,16 +232,16 @@ find_package(GooglePerfTools) ############################################################################### # Support ccache, if installed if(NOT MSVC AND NOT XCODE_VERSION) - find_program(CCACHE_FOUND ccache) - if(CCACHE_FOUND) - if(GTSAM_BUILD_WITH_CCACHE) - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) - set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) - else() - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "") - set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "") - endif() - endif(CCACHE_FOUND) + find_program(CCACHE_FOUND ccache) + if(CCACHE_FOUND) + if(GTSAM_BUILD_WITH_CCACHE) + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) + set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) + else() + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "") + set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "") + endif() + endif(CCACHE_FOUND) endif() ############################################################################### @@ -280,74 +285,74 @@ option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF) # Switch for using system Eigen or GTSAM-bundled Eigen if(GTSAM_USE_SYSTEM_EIGEN) - find_package(Eigen3 REQUIRED) + find_package(Eigen3 REQUIRED) - # Use generic Eigen include paths e.g. - set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}") + # Use generic Eigen include paths e.g. + set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}") - # check if MKL is also enabled - can have one or the other, but not both! - # Note: Eigen >= v3.2.5 includes our patches - if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5)) - message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL") - endif() + # check if MKL is also enabled - can have one or the other, but not both! + # Note: Eigen >= v3.2.5 includes our patches + if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5)) + message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL") + endif() - # Check for Eigen version which doesn't work with MKL - # See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details. - if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4)) - message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.") - endif() + # Check for Eigen version which doesn't work with MKL + # See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details. + if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4)) + message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.") + endif() - # The actual include directory (for BUILD cmake target interface): - set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}") + # The actual include directory (for BUILD cmake target interface): + set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}") else() - # Use bundled Eigen include path. - # Clear any variables set by FindEigen3 - if(EIGEN3_INCLUDE_DIR) - set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE) - endif() + # Use bundled Eigen include path. + # Clear any variables set by FindEigen3 + if(EIGEN3_INCLUDE_DIR) + set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE) + endif() - # set full path to be used by external projects - # this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in - set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/") + # set full path to be used by external projects + # this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in + set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/") - # The actual include directory (for BUILD cmake target interface): - set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/") + # The actual include directory (for BUILD cmake target interface): + set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/") endif() # Detect Eigen version: set(EIGEN_VER_H "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}/Eigen/src/Core/util/Macros.h") if (EXISTS ${EIGEN_VER_H}) - file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION) + file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION) - # Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION XX", etc... + # Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION XX", etc... - string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}") - string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}") + string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}") + string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}") - string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}") - string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}") + string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}") + string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}") - string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}") - string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}") + string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}") + string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}") - set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}") + set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}") - message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}") + message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}") else() - message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`") + message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`") endif () if (MSVC) - if (BUILD_SHARED_LIBS) - # mute eigen static assert to avoid errors in shared lib - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) - endif() - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen + if (BUILD_SHARED_LIBS) + # mute eigen static assert to avoid errors in shared lib + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) + endif() + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen endif() if (APPLE AND BUILD_SHARED_LIBS) - # Set the default install directory on macOS - set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib") + # Set the default install directory on macOS + set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib") endif() ############################################################################### @@ -356,42 +361,42 @@ endif() # Build list of possible allocators set(possible_allocators "") if(GTSAM_USE_TBB) - list(APPEND possible_allocators TBB) - set(preferred_allocator TBB) + list(APPEND possible_allocators TBB) + set(preferred_allocator TBB) else() - list(APPEND possible_allocators BoostPool STL) - set(preferred_allocator STL) + list(APPEND possible_allocators BoostPool STL) + set(preferred_allocator STL) endif() if(GOOGLE_PERFTOOLS_FOUND) - list(APPEND possible_allocators tcmalloc) + list(APPEND possible_allocators tcmalloc) endif() # Check if current allocator choice is valid and set cache option list(FIND possible_allocators "${GTSAM_DEFAULT_ALLOCATOR}" allocator_valid) if(allocator_valid EQUAL -1) - set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE) + set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE) else() - set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator") + set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator") endif() set_property(CACHE GTSAM_DEFAULT_ALLOCATOR PROPERTY STRINGS ${possible_allocators}) mark_as_advanced(GTSAM_DEFAULT_ALLOCATOR) # Define compile flags depending on allocator if("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "BoostPool") - set(GTSAM_ALLOCATOR_BOOSTPOOL 1) + set(GTSAM_ALLOCATOR_BOOSTPOOL 1) elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "STL") - set(GTSAM_ALLOCATOR_STL 1) + set(GTSAM_ALLOCATOR_STL 1) elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "TBB") - set(GTSAM_ALLOCATOR_TBB 1) + set(GTSAM_ALLOCATOR_TBB 1) elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc") - set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator - list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc") + set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator + list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc") endif() if(MSVC) - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS) - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS) + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code endif() # GCC 4.8+ complains about local typedefs which we use for shared_ptr etc. @@ -419,14 +424,11 @@ endif() # Build CppUnitLite add_subdirectory(CppUnitLite) -# Build wrap -if (GTSAM_BUILD_WRAP) - add_subdirectory(wrap) - # suppress warning of cython line being too long - if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-misleading-indentation") - endif() -endif(GTSAM_BUILD_WRAP) +# This is the new wrapper +if(GTSAM_BUILD_PYTHON) + list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/wrap/cmake") + add_subdirectory(python) +endif() # Build GTSAM library add_subdirectory(gtsam) @@ -447,23 +449,9 @@ endif() # Matlab toolbox if (GTSAM_INSTALL_MATLAB_TOOLBOX) - add_subdirectory(matlab) + add_subdirectory(matlab) endif() -# Cython wrap -if (GTSAM_INSTALL_CYTHON_TOOLBOX) - set(GTSAM_INSTALL_CYTHON_TOOLBOX 1) - # Set up cache options - # Cython install path appended with Build type (e.g. cython, cythonDebug, etc). - # This does not override custom values set from the command line - set(GTSAM_CYTHON_INSTALL_PATH "${PROJECT_BINARY_DIR}/cython${GTSAM_BUILD_TAG}" CACHE PATH "Cython toolbox destination, blank defaults to PROJECT_BINARY_DIR/cython") - set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency) - add_subdirectory(cython ${GTSAM_CYTHON_INSTALL_PATH}) -else() - set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h -endif() - - # Install config and export files GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in") export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake) @@ -509,116 +497,116 @@ set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.43)") #Example: "libc6 (>= # Print configuration variables message(STATUS "===============================================================") message(STATUS "================ Configuration Options ======================") -message(STATUS " CMAKE_CXX_COMPILER_ID type : ${CMAKE_CXX_COMPILER_ID}") -message(STATUS " CMAKE_CXX_COMPILER_VERSION : ${CMAKE_CXX_COMPILER_VERSION}") -message(STATUS " CMake version : ${CMAKE_VERSION}") -message(STATUS " CMake generator : ${CMAKE_GENERATOR}") -message(STATUS " CMake build tool : ${CMAKE_BUILD_TOOL}") +print_config("CMAKE_CXX_COMPILER_ID type" "${CMAKE_CXX_COMPILER_ID}") +print_config("CMAKE_CXX_COMPILER_VERSION" "${CMAKE_CXX_COMPILER_VERSION}") +print_config("CMake version" "${CMAKE_VERSION}") +print_config("CMake generator" "${CMAKE_GENERATOR}") +print_config("CMake build tool" "${CMAKE_BUILD_TOOL}") message(STATUS "Build flags ") -print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ") -print_config_flag(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all' ") -print_config_flag(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'") +print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests") +print_enabled_config(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all'") +print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'") if (DOXYGEN_FOUND) - print_config_flag(${GTSAM_BUILD_DOCS} "Build Docs ") + print_enabled_config(${GTSAM_BUILD_DOCS} "Build Docs") endif() -print_config_flag(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries ") -print_config_flag(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name ") +print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries") +print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name") if(GTSAM_UNSTABLE_AVAILABLE) - print_config_flag(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ") + print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ") + print_enabled_config(${GTSAM_UNSTABLE_BUILD_PYTHON} "Build GTSAM unstable Python ") + print_enabled_config(${GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX} "Build MATLAB Toolbox for unstable") endif() if(NOT MSVC AND NOT XCODE_VERSION) - print_config_flag(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ") - message(STATUS " Build type : ${CMAKE_BUILD_TYPE}") - message(STATUS " C compilation flags : ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") - message(STATUS " C++ compilation flags : ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") + print_enabled_config(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ") + print_config("Build type" "${CMAKE_BUILD_TYPE}") + print_config("C compilation flags" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") + print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") endif() print_build_options_for_target(gtsam) -message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") +print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") if(GTSAM_USE_TBB) - message(STATUS " Use Intel TBB : Yes") + print_config("Use Intel TBB" "Yes") elseif(TBB_FOUND) - message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled") + print_config("Use Intel TBB" "TBB found but GTSAM_WITH_TBB is disabled") else() - message(STATUS " Use Intel TBB : TBB not found") + print_config("Use Intel TBB" "TBB not found") endif() if(GTSAM_USE_EIGEN_MKL) - message(STATUS " Eigen will use MKL : Yes") + print_config("Eigen will use MKL" "Yes") elseif(MKL_FOUND) - message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled") + print_config("Eigen will use MKL" "MKL found but GTSAM_WITH_EIGEN_MKL is disabled") else() - message(STATUS " Eigen will use MKL : MKL not found") + print_config("Eigen will use MKL" "MKL not found") endif() if(GTSAM_USE_EIGEN_MKL_OPENMP) - message(STATUS " Eigen will use MKL and OpenMP : Yes") + print_config("Eigen will use MKL and OpenMP" "Yes") elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL) - message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") + print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") elseif(OPENMP_FOUND AND NOT MKL_FOUND) - message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found") + print_config("Eigen will use MKL and OpenMP" "OpenMP found but MKL not found") elseif(OPENMP_FOUND) - message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") + print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") else() - message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found") + print_config("Eigen will use MKL and OpenMP" "OpenMP not found") endif() -message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}") +print_config("Default allocator" "${GTSAM_DEFAULT_ALLOCATOR}") if(GTSAM_THROW_CHEIRALITY_EXCEPTION) - message(STATUS " Cheirality exceptions enabled : YES") + print_config("Cheirality exceptions enabled" "YES") else() - message(STATUS " Cheirality exceptions enabled : NO") + print_config("Cheirality exceptions enabled" "NO") endif() if(NOT MSVC AND NOT XCODE_VERSION) - if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) - message(STATUS " Build with ccache : Yes") - elseif(CCACHE_FOUND) - message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") - else() - message(STATUS " Build with ccache : No") - endif() + if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) + print_config("Build with ccache" "Yes") + elseif(CCACHE_FOUND) + print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") + else() + print_config("Build with ccache" "No") + endif() endif() -message(STATUS "Packaging flags ") -message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}") -message(STATUS " CPack Generator : ${CPACK_GENERATOR}") +message(STATUS "Packaging flags") +print_config("CPack Source Generator" "${CPACK_SOURCE_GENERATOR}") +print_config("CPack Generator" "${CPACK_GENERATOR}") message(STATUS "GTSAM flags ") -print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ") -print_config_flag(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ") -print_config_flag(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") -print_config_flag(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") -print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1") -print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ") -print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ") -print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration") -print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ") +print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ") +print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ") +print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") +print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") +print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1") +print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ") +print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration") -message(STATUS "MATLAB toolbox flags ") -print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ") +message(STATUS "MATLAB toolbox flags") +print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ") if (${GTSAM_INSTALL_MATLAB_TOOLBOX}) - message(STATUS " MATLAB root : ${MATLAB_ROOT}") - message(STATUS " MEX binary : ${MEX_COMMAND}") + print_config("MATLAB root" "${MATLAB_ROOT}") + print_config("MEX binary" "${MEX_COMMAND}") +endif() +message(STATUS "Python toolbox flags ") +print_enabled_config(${GTSAM_BUILD_PYTHON} "Build Python module with pybind ") +if(GTSAM_BUILD_PYTHON) + print_config("Python version" ${GTSAM_PYTHON_VERSION}) endif() -message(STATUS "Cython toolbox flags ") -print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ") -if(GTSAM_INSTALL_CYTHON_TOOLBOX) - message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}") -endif() message(STATUS "===============================================================") # Print warnings at the end if(GTSAM_WITH_TBB AND NOT TBB_FOUND) - message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.") + message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.") endif() if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND) - message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.") + message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.") endif() if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND) - message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.") + message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.") endif() # Include CPack *after* all flags diff --git a/LICENSE b/LICENSE index d828deb55..228a6b942 100644 --- a/LICENSE +++ b/LICENSE @@ -23,3 +23,5 @@ ordering library - Included unmodified in gtsam/3rdparty/metis - Licenced under Apache License v 2.0, provided in gtsam/3rdparty/metis/LICENSE.txt +- Spectra v0.9.0: Sparse Eigenvalue Computation Toolkit as a Redesigned ARPACK. + - Licenced under MPL2, provided at https://github.com/yixuan/spectra diff --git a/README.md b/README.md index 0b3ecb11d..99903f0b9 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,11 @@ # README - Georgia Tech Smoothing and Mapping Library +**Important Note** + +As of August 1 2020, the `develop` branch is officially in "Pre 4.1" mode, and features deprecated in 4.0 have been removed. Please use the last [4.0.3 release](https://github.com/borglab/gtsam/releases/tag/4.0.3) if you need those features. + +However, most are easily converted and can be tracked down (in 4.0.3) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V4`. + ## What is GTSAM? GTSAM is a C++ library that implements smoothing and @@ -7,13 +13,14 @@ mapping (SAM) in robotics and vision, using Factor Graphs and Bayes Networks as the underlying computing paradigm rather than sparse matrices. -| Platform | Build Status | -|:---------:|:-------------:| -| gcc/clang | [![Build Status](https://travis-ci.com/borglab/gtsam.svg?branch=develop)](https://travis-ci.com/borglab/gtsam/) | -| MSVC | [![Build status](https://ci.appveyor.com/api/projects/status/3enllitj52jsxwfg/branch/develop?svg=true)](https://ci.appveyor.com/project/dellaert/gtsam) | +| Platform | Compiler | Build Status | +|:------------:|:---------:|:-------------:| +| Ubuntu 18.04 | gcc/clang | ![Linux CI](https://github.com/borglab/gtsam/workflows/Linux%20CI/badge.svg) | +| macOS | clang | ![macOS CI](https://github.com/borglab/gtsam/workflows/macOS%20CI/badge.svg) | +| Windows | MSVC | ![Windows CI](https://github.com/borglab/gtsam/workflows/Windows%20CI/badge.svg) | -On top of the C++ library, GTSAM includes [wrappers for MATLAB & Python](##Wrappers). +On top of the C++ library, GTSAM includes [wrappers for MATLAB & Python](#wrappers). ## Quickstart diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 3747354cf..000000000 --- a/appveyor.yml +++ /dev/null @@ -1,33 +0,0 @@ -# version format -version: 4.0.3-{branch}-build{build} - -os: Visual Studio 2019 - -clone_folder: c:\projects\gtsam - -platform: x64 -configuration: Release - -environment: - CTEST_OUTPUT_ON_FAILURE: 1 - BOOST_ROOT: C:/Libraries/boost_1_71_0 - -build_script: - - cd c:\projects\gtsam\build - # As of Dec 2019, not all unit tests build cleanly for MSVC, so we'll just - # check that parts of GTSAM build correctly: - #- cmake --build . - - cmake --build . --config Release --target gtsam - - cmake --build . --config Release --target gtsam_unstable - - cmake --build . --config Release --target wrap - #- cmake --build . --target check - - cmake --build . --config Release --target check.base - - cmake --build . --config Release --target check.base_unstable - - cmake --build . --config Release --target check.linear - -before_build: - - cd c:\projects\gtsam - - mkdir build - - cd build - # Disable examples to avoid AppVeyor timeout - - cmake -G "Visual Studio 16 2019" .. -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt index d612e2fae..9d9ecd48b 100644 --- a/cmake/CMakeLists.txt +++ b/cmake/CMakeLists.txt @@ -17,8 +17,6 @@ install(FILES GtsamBuildTypes.cmake GtsamMakeConfigFile.cmake GtsamMatlabWrap.cmake - GtsamPythonWrap.cmake - GtsamCythonWrap.cmake GtsamTesting.cmake GtsamPrinting.cmake FindCython.cmake diff --git a/cmake/GtsamCythonWrap.cmake b/cmake/GtsamCythonWrap.cmake deleted file mode 100644 index c8f876895..000000000 --- a/cmake/GtsamCythonWrap.cmake +++ /dev/null @@ -1,204 +0,0 @@ -# Check Cython version, need to be >=0.25.2 -# Unset these cached variables to avoid surprises when the python/cython -# in the current environment are different from the cached! -unset(PYTHON_EXECUTABLE CACHE) -unset(CYTHON_EXECUTABLE CACHE) -unset(PYTHON_INCLUDE_DIR CACHE) -unset(PYTHON_MAJOR_VERSION CACHE) -unset(PYTHON_LIBRARY CACHE) - -# Allow override from command line -if(NOT DEFINED GTSAM_USE_CUSTOM_PYTHON_LIBRARY) - if(GTSAM_PYTHON_VERSION STREQUAL "Default") - find_package(PythonInterp REQUIRED) - find_package(PythonLibs REQUIRED) - else() - find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED) - find_package(PythonLibs ${GTSAM_PYTHON_VERSION} EXACT REQUIRED) - endif() -endif() -find_package(Cython 0.25.2 REQUIRED) - -execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" - "from __future__ import print_function;import sys;print(sys.version[0], end='')" - OUTPUT_VARIABLE PYTHON_MAJOR_VERSION -) - -# User-friendly Cython wrapping and installing function. -# Builds a Cython module from the provided interface_header. -# For example, for the interface header gtsam.h, -# this will build the wrap module 'gtsam'. -# -# Arguments: -# -# interface_header: The relative path to the wrapper interface definition file. -# extra_imports: extra header to import in the Cython pxd file. -# For example, to use Cython gtsam.pxd in your own module, -# use "from gtsam cimport *" -# install_path: destination to install the library -# libs: libraries to link with -# dependencies: Dependencies which need to be built before the wrapper -function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies) - # Paths for generated files - get_filename_component(module_name "${interface_header}" NAME_WE) - set(generated_files_path "${install_path}") - wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}") -endfunction() - -function(set_up_required_cython_packages) - # Set up building of cython module - include_directories(${PYTHON_INCLUDE_DIRS}) - find_package(NumPy REQUIRED) - include_directories(${NUMPY_INCLUDE_DIRS}) -endfunction() - - -# Convert pyx to cpp by executing cython -# This is the first step to compile cython from the command line -# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html -# -# Arguments: -# - target: The specified target for this step -# - pyx_file: The input pyx_file in full *absolute* path -# - generated_cpp: The output cpp file in full absolute path -# - include_dirs: Directories to include when executing cython -function(pyx_to_cpp target pyx_file generated_cpp include_dirs) - foreach(dir ${include_dirs}) - set(includes_for_cython ${includes_for_cython} -I ${dir}) - endforeach() - - add_custom_command( - OUTPUT ${generated_cpp} - COMMAND - ${CYTHON_EXECUTABLE} -X boundscheck=False -v --fast-fail --cplus -${PYTHON_MAJOR_VERSION} ${includes_for_cython} ${pyx_file} -o ${generated_cpp} - VERBATIM) - add_custom_target(${target} ALL DEPENDS ${generated_cpp}) -endfunction() - -# Build the cpp file generated by converting pyx using cython -# This is the second step to compile cython from the command line -# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html -# -# Arguments: -# - target: The specified target for this step -# - cpp_file: The input cpp_file in full *absolute* path -# - output_lib_we: The output lib filename only (without extension) -# - output_dir: The output directory -function(build_cythonized_cpp target cpp_file output_lib_we output_dir) - add_library(${target} MODULE ${cpp_file}) - - if(WIN32) - # Use .pyd extension instead of .dll on Windows - set_target_properties(${target} PROPERTIES SUFFIX ".pyd") - - # Add full path to the Python library - target_link_libraries(${target} ${PYTHON_LIBRARIES}) - endif() - - if(APPLE) - set(link_flags "-undefined dynamic_lookup") - endif() - set_target_properties(${target} - PROPERTIES COMPILE_FLAGS "-w" - LINK_FLAGS "${link_flags}" - OUTPUT_NAME ${output_lib_we} - PREFIX "" - ${CMAKE_BUILD_TYPE_UPPER}_POSTFIX "" - LIBRARY_OUTPUT_DIRECTORY ${output_dir}) -endfunction() - -# Cythonize a pyx from the command line as described at -# http://cython.readthedocs.io/en/latest/src/reference/compilation.html -# Arguments: -# - target: The specified target -# - pyx_file: The input pyx_file in full *absolute* path -# - output_lib_we: The output lib filename only (without extension) -# - output_dir: The output directory -# - include_dirs: Directories to include when executing cython -# - libs: Libraries to link with -# - interface_header: For dependency. Any update in interface header will re-trigger cythonize -function(cythonize target pyx_file output_lib_we output_dir include_dirs libs interface_header dependencies) - get_filename_component(pyx_path "${pyx_file}" DIRECTORY) - get_filename_component(pyx_name "${pyx_file}" NAME_WE) - set(generated_cpp "${output_dir}/${pyx_name}.cpp") - - set_up_required_cython_packages() - pyx_to_cpp(${target}_pyx2cpp ${pyx_file} ${generated_cpp} "${include_dirs}") - - # Late dependency injection, to make sure this gets called whenever the interface header is updated - # See: https://stackoverflow.com/questions/40032593/cmake-does-not-rebuild-dependent-after-prerequisite-changes - add_custom_command(OUTPUT ${generated_cpp} DEPENDS ${interface_header} ${pyx_file} APPEND) - if (NOT "${dependencies}" STREQUAL "") - add_dependencies(${target}_pyx2cpp "${dependencies}") - endif() - - build_cythonized_cpp(${target} ${generated_cpp} ${output_lib_we} ${output_dir}) - if (NOT "${libs}" STREQUAL "") - target_link_libraries(${target} "${libs}") - endif() - add_dependencies(${target} ${target}_pyx2cpp) - - if(TARGET ${python_install_target}) - add_dependencies(${python_install_target} ${target}) - endif() -endfunction() - -# Internal function that wraps a library and compiles the wrapper -function(wrap_library_cython interface_header generated_files_path extra_imports libs dependencies) - # Wrap codegen interface - # Extract module path and name from interface header file name - # wrap requires interfacePath to be *absolute* - get_filename_component(interface_header "${interface_header}" ABSOLUTE) - get_filename_component(module_path "${interface_header}" PATH) - get_filename_component(module_name "${interface_header}" NAME_WE) - - # Wrap module to Cython pyx - message(STATUS "Cython wrapper generating ${generated_files_path}/${module_name}.pyx") - set(generated_pyx "${generated_files_path}/${module_name}.pyx") - if(NOT EXISTS ${generated_files_path}) - file(MAKE_DIRECTORY "${generated_files_path}") - endif() - - add_custom_command( - OUTPUT ${generated_pyx} - DEPENDS ${interface_header} wrap - COMMAND - wrap --cython ${module_path} ${module_name} ${generated_files_path} "${extra_imports}" - VERBATIM - WORKING_DIRECTORY ${generated_files_path}/../) - add_custom_target(cython_wrap_${module_name}_pyx ALL DEPENDS ${generated_pyx}) - if(NOT "${dependencies}" STREQUAL "") - add_dependencies(cython_wrap_${module_name}_pyx ${dependencies}) - endif() - - message(STATUS "Cythonize and build ${module_name}.pyx") - get_property(include_dirs DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES) - cythonize(cythonize_${module_name} ${generated_pyx} ${module_name} - ${generated_files_path} "${include_dirs}" "${libs}" ${interface_header} cython_wrap_${module_name}_pyx) - - # distclean - add_custom_target(wrap_${module_name}_cython_distclean - COMMAND cmake -E remove_directory ${generated_files_path}) -endfunction() - -# Helper function to install Cython scripts and handle multiple build types where the scripts -# should be installed to all build type toolboxes -# -# Arguments: -# source_directory: The source directory to be installed. "The last component of each directory -# name is appended to the destination directory but a trailing slash may be -# used to avoid this because it leaves the last component empty." -# (https://cmake.org/cmake/help/v3.3/command/install.html?highlight=install#installing-directories) -# dest_directory: The destination directory to install to. -# patterns: list of file patterns to install -function(install_cython_scripts source_directory dest_directory patterns) - set(patterns_args "") - set(exclude_patterns "") - - foreach(pattern ${patterns}) - list(APPEND patterns_args PATTERN "${pattern}") - endforeach() - - file(COPY "${source_directory}" DESTINATION "${dest_directory}" - FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE) -endfunction() diff --git a/cmake/GtsamMatlabWrap.cmake b/cmake/GtsamMatlabWrap.cmake index 5fc829bf2..111114a7b 100644 --- a/cmake/GtsamMatlabWrap.cmake +++ b/cmake/GtsamMatlabWrap.cmake @@ -23,6 +23,11 @@ else() file(GLOB matlab_bin_directories "/usr/local/MATLAB/*/bin") set(mex_program_name "mex") endif() + +if(GTSAM_CUSTOM_MATLAB_PATH) + set(matlab_bin_directories ${GTSAM_CUSTOM_MATLAB_PATH}) +endif() + # Run find_program explicitly putting $PATH after our predefined program # directories using 'ENV PATH' and 'NO_SYSTEM_ENVIRONMENT_PATH' - this prevents # finding the LaTeX mex program (totally unrelated to MATLAB Mex) when LaTeX is @@ -209,15 +214,34 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex # Set up generation of module source file file(MAKE_DIRECTORY "${generated_files_path}") + + if(GTSAM_PYTHON_VERSION STREQUAL "Default") + find_package(PythonInterp REQUIRED) + find_package(PythonLibs REQUIRED) + else() + find_package(PythonInterp + ${GTSAM_PYTHON_VERSION} + EXACT + REQUIRED) + find_package(PythonLibs + ${GTSAM_PYTHON_VERSION} + EXACT + REQUIRED) + endif() + + set(_ignore gtsam::Point2 + gtsam::Point3) add_custom_command( OUTPUT ${generated_cpp_file} - DEPENDS ${interfaceHeader} wrap ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects} - COMMAND - wrap --matlab - ${modulePath} - ${moduleName} - ${generated_files_path} - ${matlab_h_path} + DEPENDS ${interfaceHeader} ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects} + COMMAND + ${PYTHON_EXECUTABLE} + ${CMAKE_SOURCE_DIR}/wrap/matlab_wrapper.py + --src ${interfaceHeader} + --module_name ${moduleName} + --out ${generated_files_path} + --top_module_namespaces ${moduleName} + --ignore ${_ignore} VERBATIM WORKING_DIRECTORY ${generated_files_path}) diff --git a/cmake/GtsamPrinting.cmake b/cmake/GtsamPrinting.cmake index e53f9c54f..5757f5ee1 100644 --- a/cmake/GtsamPrinting.cmake +++ b/cmake/GtsamPrinting.cmake @@ -1,14 +1,3 @@ -# print configuration variables -# Usage: -#print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ") -function(print_config_flag flag msg) - if (flag) - message(STATUS " ${msg}: Enabled") - else () - message(STATUS " ${msg}: Disabled") - endif () -endfunction() - # Based on https://github.com/jimbraun/XCDF/blob/master/cmake/CMakePadString.cmake function(string_pad RESULT_NAME DESIRED_LENGTH VALUE) string(LENGTH "${VALUE}" VALUE_LENGTH) @@ -26,6 +15,27 @@ endfunction() set(GTSAM_PRINT_SUMMARY_PADDING_LENGTH 50 CACHE STRING "Padding of cmake summary report lines after configuring.") mark_as_advanced(GTSAM_PRINT_SUMMARY_PADDING_LENGTH) +# print configuration variables with automatic padding +# Usage: +# print_config(${GTSAM_BUILD_TESTS} "Build Tests") +function(print_config config msg) + string_pad(padded_config ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${config}") + message(STATUS "${padded_config}: ${msg}") +endfunction() + +# print configuration variable with enabled/disabled value +# Usage: +# print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests ") +function(print_enabled_config config msg) + string_pad(padded_msg ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${msg}") + if (config) + message(STATUS "${padded_msg}: Enabled") + else () + message(STATUS "${padded_msg}: Disabled") + endif () +endfunction() + + # Print " var: ${var}" padding with spaces as needed function(print_padded variable_name) string_pad(padded_prop ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${variable_name}") diff --git a/cmake/GtsamPythonWrap.cmake b/cmake/GtsamPythonWrap.cmake deleted file mode 100644 index 714e37488..000000000 --- a/cmake/GtsamPythonWrap.cmake +++ /dev/null @@ -1,102 +0,0 @@ -#Setup cache options -set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "Target python version for GTSAM python module. Use 'Default' to chose the default version") -set(GTSAM_BUILD_PYTHON_FLAGS "" CACHE STRING "Extra flags for running Matlab PYTHON compilation") -set(GTSAM_PYTHON_INSTALL_PATH "" CACHE PATH "Python toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/borg/python") -if(NOT GTSAM_PYTHON_INSTALL_PATH) - set(GTSAM_PYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/borg/python") -endif() - -#Author: Paul Furgale Modified by Andrew Melim -function(wrap_python TARGET_NAME PYTHON_MODULE_DIRECTORY) - # # Boost - # find_package(Boost COMPONENTS python filesystem system REQUIRED) - # include_directories(${Boost_INCLUDE_DIRS}) - - # # Find Python - # FIND_PACKAGE(PythonLibs 2.7 REQUIRED) - # INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIRS}) - - IF(APPLE) - # The apple framework headers don't include the numpy headers for some reason. - GET_FILENAME_COMPONENT(REAL_PYTHON_INCLUDE ${PYTHON_INCLUDE_DIRS} REALPATH) - IF( ${REAL_PYTHON_INCLUDE} MATCHES Python.framework) - message("Trying to find extra headers for numpy from ${REAL_PYTHON_INCLUDE}.") - message("Looking in ${REAL_PYTHON_INCLUDE}/../../Extras/lib/python/numpy/core/include/numpy") - FIND_PATH(NUMPY_INCLUDE_DIR arrayobject.h - ${REAL_PYTHON_INCLUDE}/../../Extras/lib/python/numpy/core/include/numpy - ${REAL_PYTHON_INCLUDE}/numpy - ) - IF(${NUMPY_INCLUDE_DIR} MATCHES NOTFOUND) - message("Unable to find numpy include directories: ${NUMPY_INCLUDE_DIR}") - ELSE() - message("Found headers at ${NUMPY_INCLUDE_DIR}") - INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR}) - INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR}/..) - ENDIF() - ENDIF() - ENDIF(APPLE) - - if(MSVC) - add_library(${moduleName}_python MODULE ${ARGN}) - set_target_properties(${moduleName}_python PROPERTIES - OUTPUT_NAME ${moduleName}_python - CLEAN_DIRECT_OUTPUT 1 - VERSION 1 - SOVERSION 0 - SUFFIX ".pyd") - target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp - - set(PYLIB_OUTPUT_FILE $) - message(${PYLIB_OUTPUT_FILE}) - get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE) - set(PYLIB_SO_NAME ${PYLIB_OUTPUT_NAME}.pyd) - - ELSE() - # Create a shared library - add_library(${moduleName}_python SHARED ${generated_cpp_file}) - - set_target_properties(${moduleName}_python PROPERTIES - OUTPUT_NAME ${moduleName}_python - CLEAN_DIRECT_OUTPUT 1) - target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp - # On OSX and Linux, the python library must end in the extension .so. Build this - # filename here. - get_property(PYLIB_OUTPUT_FILE TARGET ${moduleName}_python PROPERTY LOCATION) - set(PYLIB_OUTPUT_FILE $) - message(${PYLIB_OUTPUT_FILE}) - get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE) - set(PYLIB_SO_NAME lib${moduleName}_python.so) - ENDIF(MSVC) - - # Installs the library in the gtsam folder, which is used by setup.py to create the gtsam package - set(PYTHON_MODULE_DIRECTORY ${CMAKE_SOURCE_DIR}/python/gtsam) - # Cause the library to be output in the correct directory. - add_custom_command(TARGET ${moduleName}_python - POST_BUILD - COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME} - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - COMMENT "Copying library files to python directory" ) - - # Cause the library to be output in the correct directory. - add_custom_command(TARGET ${TARGET_NAME} - POST_BUILD - COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME} - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - COMMENT "Copying library files to python directory" ) - - get_directory_property(AMCF ADDITIONAL_MAKE_CLEAN_FILES) - list(APPEND AMCF ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME}) - set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${AMCF}") -endfunction(wrap_python) - -# Macro to get list of subdirectories -macro(SUBDIRLIST result curdir) - file(GLOB children RELATIVE ${curdir} ${curdir}/*) - set(dirlist "") - foreach(child ${children}) - if(IS_DIRECTORY ${curdir}/${child}) - list(APPEND dirlist ${child}) - endif() - endforeach() - set(${result} ${dirlist}) -endmacro() diff --git a/cmake/dllexport.h.in b/cmake/dllexport.h.in index 9a0a344b7..7d757edea 100644 --- a/cmake/dllexport.h.in +++ b/cmake/dllexport.h.in @@ -47,9 +47,14 @@ # endif # endif #else +#ifdef __APPLE__ +# define @library_name@_EXPORT __attribute__((visibility("default"))) +# define @library_name@_EXTERN_EXPORT extern +#else # define @library_name@_EXPORT # define @library_name@_EXTERN_EXPORT extern #endif +#endif #undef BUILD_SHARED_LIBS diff --git a/cython/CMakeLists.txt b/cython/CMakeLists.txt deleted file mode 100644 index 221025575..000000000 --- a/cython/CMakeLists.txt +++ /dev/null @@ -1,57 +0,0 @@ -# Install cython components -include(GtsamCythonWrap) - -# Create the cython toolbox for the gtsam library -if (GTSAM_INSTALL_CYTHON_TOOLBOX) - # Add the new make target command - set(python_install_target python-install) - add_custom_target(${python_install_target} - COMMAND ${PYTHON_EXECUTABLE} ${GTSAM_CYTHON_INSTALL_PATH}/setup.py install - WORKING_DIRECTORY ${GTSAM_CYTHON_INSTALL_PATH}) - - # build and include the eigency version of eigency - add_subdirectory(gtsam_eigency) - include_directories(${GTSAM_EIGENCY_INSTALL_PATH}) - - # Fix for error "C1128: number of sections exceeded object file format limit" - if(MSVC) - add_compile_options(/bigobj) - endif() - - # First set up all the package related files. - # This also ensures the below wrap operations work correctly. - set(CYTHON_INSTALL_REQUIREMENTS_FILE "${PROJECT_SOURCE_DIR}/cython/requirements.txt") - - # Install the custom-generated __init__.py - # This makes the cython (sub-)directories into python packages, so gtsam can be found while wrapping gtsam_unstable - configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam/__init__.py COPYONLY) - configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable/__init__.py COPYONLY) - configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${GTSAM_CYTHON_INSTALL_PATH}/setup.py) - - # Wrap gtsam - add_custom_target(gtsam_header DEPENDS "../gtsam.h") - wrap_and_install_library_cython("../gtsam.h" # interface_header - "" # extra imports - "${GTSAM_CYTHON_INSTALL_PATH}/gtsam" # install path - gtsam # library to link with - "wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping - ) - add_dependencies(${python_install_target} gtsam gtsam_header) - - # Wrap gtsam_unstable - if(GTSAM_BUILD_UNSTABLE) - add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h") - wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header - "from gtsam.gtsam cimport *" # extra imports - "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable" # install path - gtsam_unstable # library to link with - "gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping - ) - add_dependencies(${python_install_target} gtsam_unstable gtsam_unstable_header) - endif() - - # install scripts and tests - install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py") - install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py") - -endif () diff --git a/cython/README.md b/cython/README.md deleted file mode 100644 index f69b7a5a6..000000000 --- a/cython/README.md +++ /dev/null @@ -1,147 +0,0 @@ -# Python Wrapper - -This is the Python wrapper around the GTSAM C++ library. We use Cython to generate the bindings to the underlying C++ code. - -## Requirements - -- If you want to build the GTSAM python library for a specific python version (eg 3.6), - use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used. -- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment), - then the environment should be active while building GTSAM. -- This wrapper needs `Cython(>=0.25.2)`, `backports_abc(>=0.5)`, and `numpy(>=1.11.0)`. These can be installed as follows: - - ```bash - pip install -r /cython/requirements.txt - ``` - -- For compatibility with GTSAM's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git), - named `gtsam_eigency`, to interface between C++'s Eigen and Python's numpy. - -## Install - -- Run cmake with the `GTSAM_INSTALL_CYTHON_TOOLBOX` cmake flag enabled to configure building the wrapper. The wrapped module will be built and copied to the directory defined by `GTSAM_CYTHON_INSTALL_PATH`, which is by default `/cython` in Release mode and `/cython` for other modes. - -- Build GTSAM and the wrapper with `make`. - -- To install, simply run `make python-install`. - - The same command can be used to install into a virtual environment if it is active. - - **NOTE**: if you don't want GTSAM to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install GTSAM to a subdirectory of the build directory. - -- You can also directly run `make python-install` without running `make`, and it will compile all the dependencies accordingly. - -## Unit Tests - -The Cython toolbox also has a small set of unit tests located in the -test directory. To run them: - - ```bash - cd - python -m unittest discover - ``` - -## Utils - -TODO - -## Examples - -TODO - -## Writing Your Own Scripts - -See the tests for examples. - -### Some Important Notes: - -- Vector/Matrix: - - - GTSAM expects double-precision floating point vectors and matrices. - Hence, you should pass numpy matrices with `dtype=float`, or `float64`. - - Also, GTSAM expects _column-major_ matrices, unlike the default storage - scheme in numpy. Hence, you should pass column-major matrices to GTSAM using - the flag order='F'. And you always get column-major matrices back. - For more details, see [this link](https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed). - - Passing row-major matrices of different dtype, e.g. `int`, will also work - as the wrapper converts them to column-major and dtype float for you, - using numpy.array.astype(float, order='F', copy=False). - However, this will result a copy if your matrix is not in the expected type - and storage order. - -- Inner namespace: Classes in inner namespace will be prefixed by \_ in Python. - - Examples: `noiseModel_Gaussian`, `noiseModel_mEstimator_Tukey` - -- Casting from a base class to a derive class must be done explicitly. - - Examples: - - ```python - noiseBase = factor.noiseModel() - noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase) - ``` - -## Wrapping Custom GTSAM-based Project - -Please refer to the template project and the corresponding tutorial available [here](https://github.com/borglab/GTSAM-project-python). - -## KNOWN ISSUES - -- Doesn't work with python3 installed from homebrew - - size-related issue: can only wrap up to a certain number of classes: up to mEstimator! - - Guess: 64 vs 32b? disutils Compiler flags? -- Bug with Cython 0.24: instantiated factor classes return FastVector for keys(), which can't be casted to FastVector - - Upgrading to 0.25 solves the problem -- Need default constructor and default copy constructor for almost every classes... :( - - support these constructors by default and declare "delete" for special classes? - -### TODO - -- [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython. -- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in GTSAM?) -- [ ] inner namespaces ==> inner packages? -- [ ] Wrap fixed-size Matrices/Vectors? - -### Completed/Cancelled: - -- [x] Fix Python tests: don't use " import \* ": Bad style!!! (18-03-17 19:50) -- [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files -- [x] Wrap unstable @done (18-03-17 15:30) -- [x] Unify cython/GTSAM.h and the original GTSAM.h @done (18-03-17 15:30) -- [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem. -- [x] 06-03-17: manage to remove the requirements for default and copy constructors -- [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector, to FastVector. -- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: `GTSAM::JointMarginal __pyx_t_1;` Users don't have to wrap this constructor, however. -- [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00) -- [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30) -- [x] CMake install script @done (25-11-16 02:30) -- [ ] [REFACTOR] better name for uninstantiateClass: very vague!! @cancelled (25-11-16 02:30) -- lazy -- [ ] forward declaration? @cancelled (23-11-16 13:00) - nothing to do, seem to work? -- [x] wrap VariableIndex: why is it in inference? If need to, shouldn't have constructors to specific FactorGraphs @done (23-11-16 13:00) -- [x] Global functions @done (22-11-16 21:00) -- [x] [REFACTOR] typesEqual --> isSameSignature @done (22-11-16 21:00) -- [x] Proper overloads (constructors, static methods, methods) @done (20-11-16 21:00) -- [x] Allow overloading methods. The current solution is annoying!!! @done (20-11-16 21:00) -- [x] Casting from parent and grandparents @done (16-11-16 17:00) -- [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00) -- [x] Support "print obj" @done (16-11-16 17:00) -- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00) -- [x] Cython: Key and size_t: traits doesn't exist @done (16-09-12 18:34) -- [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19) -- [x] [Nice to have] parse typedef @done (16-09-13 17:19) -- [x] ctypedef at correct places @done (16-09-12 18:34) -- [x] expand template variable type in constructor/static methods? @done (16-09-12 18:34) -- [x] NonlinearOptimizer: copy constructor deleted!!! @done (16-09-13 17:20) -- [x] Value: no default constructor @done (16-09-13 17:20) -- [x] ctypedef PriorFactor[Vector] PriorFactorVector @done (16-09-19 12:25) -- [x] Delete duplicate methods in derived class @done (16-09-12 13:38) -- [x] Fix return properly @done (16-09-11 17:14) -- [x] handle pair @done (16-09-11 17:14) -- [x] Eigency: ambiguous call: A(const T&) A(const Vector& v) and Eigency A(Map[Vector]& v) @done (16-09-11 07:59) -- [x] Eigency: Constructor: ambiguous construct from Vector/Matrix @done (16-09-11 07:59) -- [x] Eigency: Fix method template of Vector/Matrix: template argument is [Vector] while arugment is Map[Vector] @done (16-09-11 08:22) -- [x] Robust noise: copy assignment operator is deleted because of shared_ptr of the abstract Base class @done (16-09-10 09:05) -- [ ] Cython: Constructor: generate default constructor? (hack: if it's serializable?) @cancelled (16-09-13 17:20) -- [ ] Eigency: Map[] to Block @created(16-09-10 07:59) @cancelled (16-09-11 08:28) - -- inference before symbolic/linear -- what's the purpose of "virtual" ?? diff --git a/cython/gtsam/__init__.py b/cython/gtsam/__init__.py deleted file mode 100644 index d40ee4502..000000000 --- a/cython/gtsam/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -from .gtsam import * - -try: - import gtsam_unstable - - - def _deprecated_wrapper(item, name): - def wrapper(*args, **kwargs): - from warnings import warn - message = ('importing the unstable item "{}" directly from gtsam is deprecated. '.format(name) + - 'Please import it from gtsam_unstable.') - warn(message) - return item(*args, **kwargs) - return wrapper - - - for name in dir(gtsam_unstable): - if not name.startswith('__'): - item = getattr(gtsam_unstable, name) - if callable(item): - item = _deprecated_wrapper(item, name) - globals()[name] = item - -except ImportError: - pass - diff --git a/cython/gtsam/examples/ImuFactorExample.py b/cython/gtsam/examples/ImuFactorExample.py deleted file mode 100644 index 0e01766e7..000000000 --- a/cython/gtsam/examples/ImuFactorExample.py +++ /dev/null @@ -1,153 +0,0 @@ -""" -GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, -Atlanta, Georgia 30332-0415 -All Rights Reserved - -See LICENSE for the license information - -A script validating and demonstrating the ImuFactor inference. - -Author: Frank Dellaert, Varun Agrawal -""" - -from __future__ import print_function - -import math - -import gtsam -import matplotlib.pyplot as plt -import numpy as np -from gtsam import symbol_shorthand_B as B -from gtsam import symbol_shorthand_V as V -from gtsam import symbol_shorthand_X as X -from gtsam.utils.plot import plot_pose3 -from mpl_toolkits.mplot3d import Axes3D - -from PreintegrationExample import POSES_FIG, PreintegrationExample - -BIAS_KEY = B(0) - - -np.set_printoptions(precision=3, suppress=True) - - -class ImuFactorExample(PreintegrationExample): - - def __init__(self): - self.velocity = np.array([2, 0, 0]) - self.priorNoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) - self.velNoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) - - # Choose one of these twists to change scenario: - zero_twist = (np.zeros(3), np.zeros(3)) - forward_twist = (np.zeros(3), self.velocity) - loop_twist = (np.array([0, -math.radians(30), 0]), self.velocity) - sick_twist = ( - np.array([math.radians(30), -math.radians(30), 0]), self.velocity) - - accBias = np.array([-0.3, 0.1, 0.2]) - gyroBias = np.array([0.1, 0.3, -0.1]) - bias = gtsam.imuBias_ConstantBias(accBias, gyroBias) - - dt = 1e-2 - super(ImuFactorExample, self).__init__(sick_twist, bias, dt) - - def addPrior(self, i, graph): - state = self.scenario.navState(i) - graph.push_back(gtsam.PriorFactorPose3( - X(i), state.pose(), self.priorNoise)) - graph.push_back(gtsam.PriorFactorVector( - V(i), state.velocity(), self.velNoise)) - - def run(self): - graph = gtsam.NonlinearFactorGraph() - - # initialize data structure for pre-integrated IMU measurements - pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias) - - T = 12 - num_poses = T + 1 # assumes 1 factor per second - initial = gtsam.Values() - initial.insert(BIAS_KEY, self.actualBias) - for i in range(num_poses): - state_i = self.scenario.navState(float(i)) - - poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1) - pose = state_i.pose().compose(poseNoise) - - velocity = state_i.velocity() + np.random.randn(3)*0.1 - - initial.insert(X(i), pose) - initial.insert(V(i), velocity) - - # simulate the loop - i = 0 # state index - actual_state_i = self.scenario.navState(0) - for k, t in enumerate(np.arange(0, T, self.dt)): - # get measurements and add them to PIM - measuredOmega = self.runner.measuredAngularVelocity(t) - measuredAcc = self.runner.measuredSpecificForce(t) - pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt) - - poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1) - - actual_state_i = gtsam.NavState( - actual_state_i.pose().compose(poseNoise), - actual_state_i.velocity() + np.random.randn(3)*0.1) - - # Plot IMU many times - if k % 10 == 0: - self.plotImu(t, measuredOmega, measuredAcc) - - # Plot every second - if k % int(1 / self.dt) == 0: - self.plotGroundTruthPose(t) - - # create IMU factor every second - if (k + 1) % int(1 / self.dt) == 0: - factor = gtsam.ImuFactor(X(i), V(i), X( - i + 1), V(i + 1), BIAS_KEY, pim) - graph.push_back(factor) - if True: - print(factor) - print(pim.predict(actual_state_i, self.actualBias)) - pim.resetIntegration() - actual_state_i = self.scenario.navState(t + self.dt) - i += 1 - - # add priors on beginning and end - self.addPrior(0, graph) - self.addPrior(num_poses - 1, graph) - - # optimize using Levenberg-Marquardt optimization - params = gtsam.LevenbergMarquardtParams() - params.setVerbosityLM("SUMMARY") - optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) - result = optimizer.optimize() - - # Calculate and print marginal covariances - marginals = gtsam.Marginals(graph, result) - print("Covariance on bias:\n", marginals.marginalCovariance(BIAS_KEY)) - for i in range(num_poses): - print("Covariance on pose {}:\n{}\n".format( - i, marginals.marginalCovariance(X(i)))) - print("Covariance on vel {}:\n{}\n".format( - i, marginals.marginalCovariance(V(i)))) - - # Plot resulting poses - i = 0 - while result.exists(X(i)): - pose_i = result.atPose3(X(i)) - plot_pose3(POSES_FIG, pose_i, 0.1) - i += 1 - - gtsam.utils.plot.set_axes_equal(POSES_FIG) - - print(result.atimuBias_ConstantBias(BIAS_KEY)) - - plt.ioff() - plt.show() - - -if __name__ == '__main__': - ImuFactorExample().run() diff --git a/cython/gtsam_eigency/CMakeLists.txt b/cython/gtsam_eigency/CMakeLists.txt deleted file mode 100644 index a0cf0fbde..000000000 --- a/cython/gtsam_eigency/CMakeLists.txt +++ /dev/null @@ -1,42 +0,0 @@ -include(GtsamCythonWrap) - -# Copy eigency's sources to the build folder -# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core -# and eigency's cython pxd headers can be found when cythonizing gtsam -file(COPY "." DESTINATION ".") -set(OUTPUT_DIR "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency") -set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR}) - -# This is to make the build/cython/gtsam_eigency folder a python package -configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py) - -# include eigency headers -include_directories(${EIGENCY_INCLUDE_DIR}) - -# Cythonize and build eigency -message(STATUS "Cythonize and build eigency") -# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is -# a part of the gtsam_eigency package and generate the function call import_gtsam_eigency__conversions() -# in conversions_api.h correctly! -cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions" - "${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "") -cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core" - ${OUTPUT_DIR} "${EIGENCY_INCLUDE_DIR}" "" "" "") - -# Include Eigen headers: -target_include_directories(cythonize_eigency_conversions PUBLIC - $ - $ -) -target_include_directories(cythonize_eigency_core PUBLIC - $ - $ -) - -add_dependencies(cythonize_eigency_core cythonize_eigency_conversions) -add_custom_target(cythonize_eigency) -add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core) - -if(TARGET ${python_install_target}) - add_dependencies(${python_install_target} cythonize_eigency) -endif() diff --git a/cython/gtsam_eigency/LICENSE.txt b/cython/gtsam_eigency/LICENSE.txt deleted file mode 100644 index 71743c864..000000000 --- a/cython/gtsam_eigency/LICENSE.txt +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2016 Wouter Boomsma - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/cython/gtsam_eigency/__init__.py.in b/cython/gtsam_eigency/__init__.py.in deleted file mode 100644 index a59d51eab..000000000 --- a/cython/gtsam_eigency/__init__.py.in +++ /dev/null @@ -1,13 +0,0 @@ -import os -import numpy as np - -__eigen_dir__ = "${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}" - -def get_includes(include_eigen=True): - root = os.path.dirname(__file__) - parent = os.path.join(root, "..") - path = [root, parent, np.get_include()] - if include_eigen: - path.append(os.path.join(root, __eigen_dir__)) - return path - diff --git a/cython/gtsam_eigency/conversions.pxd b/cython/gtsam_eigency/conversions.pxd deleted file mode 100644 index f4445e585..000000000 --- a/cython/gtsam_eigency/conversions.pxd +++ /dev/null @@ -1,62 +0,0 @@ -cimport numpy as np - -cdef api np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) - diff --git a/cython/gtsam_eigency/conversions.pyx b/cython/gtsam_eigency/conversions.pyx deleted file mode 100644 index 55c9ae0cd..000000000 --- a/cython/gtsam_eigency/conversions.pyx +++ /dev/null @@ -1,327 +0,0 @@ -cimport cython -import numpy as np -from numpy.lib.stride_tricks import as_strided - -@cython.boundscheck(False) -cdef np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long row_stride, long col_stride): - cdef double[:,:] mem_view = data - dtype = 'double' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long row_stride, long col_stride): - cdef double[::1,:] mem_view = data - dtype = 'double' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long row_stride, long col_stride): - cdef double[:,:] mem_view = data - dtype = 'double' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long row_stride, long col_stride): - cdef double[::1,:] mem_view = data - dtype = 'double' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long row_stride, long col_stride): - cdef float[:,:] mem_view = data - dtype = 'float' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long row_stride, long col_stride): - cdef float[::1,:] mem_view = data - dtype = 'float' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long row_stride, long col_stride): - cdef float[:,:] mem_view = data - dtype = 'float' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long row_stride, long col_stride): - cdef float[::1,:] mem_view = data - dtype = 'float' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long row_stride, long col_stride): - cdef long[:,:] mem_view = data - dtype = 'int_' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long row_stride, long col_stride): - cdef long[::1,:] mem_view = data - dtype = 'int_' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long row_stride, long col_stride): - cdef long[:,:] mem_view = data - dtype = 'int_' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long row_stride, long col_stride): - cdef long[::1,:] mem_view = data - dtype = 'int_' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned long[:,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned long[::1,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned long[:,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned long[::1,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long row_stride, long col_stride): - cdef int[:,:] mem_view = data - dtype = 'int' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long row_stride, long col_stride): - cdef int[::1,:] mem_view = data - dtype = 'int' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long row_stride, long col_stride): - cdef int[:,:] mem_view = data - dtype = 'int' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long row_stride, long col_stride): - cdef int[::1,:] mem_view = data - dtype = 'int' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned int[:,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned int[::1,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned int[:,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned int[::1,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long row_stride, long col_stride): - cdef short[:,:] mem_view = data - dtype = 'short' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long row_stride, long col_stride): - cdef short[::1,:] mem_view = data - dtype = 'short' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long row_stride, long col_stride): - cdef short[:,:] mem_view = data - dtype = 'short' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long row_stride, long col_stride): - cdef short[::1,:] mem_view = data - dtype = 'short' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned short[:,:] mem_view = data - dtype = 'ushort' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned short[::1,:] mem_view = data - dtype = 'ushort' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned short[:,:] mem_view = data - dtype = 'ushort' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned short[::1,:] mem_view = data - dtype = 'ushort' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long row_stride, long col_stride): - cdef signed char[:,:] mem_view = data - dtype = 'int8' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long row_stride, long col_stride): - cdef signed char[::1,:] mem_view = data - dtype = 'int8' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long row_stride, long col_stride): - cdef signed char[:,:] mem_view = data - dtype = 'int8' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long row_stride, long col_stride): - cdef signed char[::1,:] mem_view = data - dtype = 'int8' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned char[:,:] mem_view = data - dtype = 'uint8' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned char[::1,:] mem_view = data - dtype = 'uint8' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned char[:,:] mem_view = data - dtype = 'uint8' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned char[::1,:] mem_view = data - dtype = 'uint8' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex128_t[:,:] mem_view = data - dtype = 'complex128' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex128_t[::1,:] mem_view = data - dtype = 'complex128' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex128_t[:,:] mem_view = data - dtype = 'complex128' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex128_t[::1,:] mem_view = data - dtype = 'complex128' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex64_t[:,:] mem_view = data - dtype = 'complex64' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex64_t[::1,:] mem_view = data - dtype = 'complex64' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex64_t[:,:] mem_view = data - dtype = 'complex64' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex64_t[::1,:] mem_view = data - dtype = 'complex64' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - diff --git a/cython/gtsam_eigency/core.pxd b/cython/gtsam_eigency/core.pxd deleted file mode 100644 index 9a84c3c16..000000000 --- a/cython/gtsam_eigency/core.pxd +++ /dev/null @@ -1,917 +0,0 @@ -cimport cython -cimport numpy as np - -ctypedef signed char schar; -ctypedef unsigned char uchar; - -ctypedef fused dtype: - uchar - schar - short - int - long - float - double - -ctypedef fused DenseType: - Matrix - Array - -ctypedef fused Rows: - _1 - _2 - _3 - _4 - _5 - _6 - _7 - _8 - _9 - _10 - _11 - _12 - _13 - _14 - _15 - _16 - _17 - _18 - _19 - _20 - _21 - _22 - _23 - _24 - _25 - _26 - _27 - _28 - _29 - _30 - _31 - _32 - Dynamic - -ctypedef Rows Cols -ctypedef Rows StrideOuter -ctypedef Rows StrideInner - -ctypedef fused DenseTypeShort: - Vector1i - Vector2i - Vector3i - Vector4i - VectorXi - RowVector1i - RowVector2i - RowVector3i - RowVector4i - RowVectorXi - Matrix1i - Matrix2i - Matrix3i - Matrix4i - MatrixXi - Vector1f - Vector2f - Vector3f - Vector4f - VectorXf - RowVector1f - RowVector2f - RowVector3f - RowVector4f - RowVectorXf - Matrix1f - Matrix2f - Matrix3f - Matrix4f - MatrixXf - Vector1d - Vector2d - Vector3d - Vector4d - VectorXd - RowVector1d - RowVector2d - RowVector3d - RowVector4d - RowVectorXd - Matrix1d - Matrix2d - Matrix3d - Matrix4d - MatrixXd - Vector1cf - Vector2cf - Vector3cf - Vector4cf - VectorXcf - RowVector1cf - RowVector2cf - RowVector3cf - RowVector4cf - RowVectorXcf - Matrix1cf - Matrix2cf - Matrix3cf - Matrix4cf - MatrixXcf - Vector1cd - Vector2cd - Vector3cd - Vector4cd - VectorXcd - RowVector1cd - RowVector2cd - RowVector3cd - RowVector4cd - RowVectorXcd - Matrix1cd - Matrix2cd - Matrix3cd - Matrix4cd - MatrixXcd - Array22i - Array23i - Array24i - Array2Xi - Array32i - Array33i - Array34i - Array3Xi - Array42i - Array43i - Array44i - Array4Xi - ArrayX2i - ArrayX3i - ArrayX4i - ArrayXXi - Array2i - Array3i - Array4i - ArrayXi - Array22f - Array23f - Array24f - Array2Xf - Array32f - Array33f - Array34f - Array3Xf - Array42f - Array43f - Array44f - Array4Xf - ArrayX2f - ArrayX3f - ArrayX4f - ArrayXXf - Array2f - Array3f - Array4f - ArrayXf - Array22d - Array23d - Array24d - Array2Xd - Array32d - Array33d - Array34d - Array3Xd - Array42d - Array43d - Array44d - Array4Xd - ArrayX2d - ArrayX3d - ArrayX4d - ArrayXXd - Array2d - Array3d - Array4d - ArrayXd - Array22cf - Array23cf - Array24cf - Array2Xcf - Array32cf - Array33cf - Array34cf - Array3Xcf - Array42cf - Array43cf - Array44cf - Array4Xcf - ArrayX2cf - ArrayX3cf - ArrayX4cf - ArrayXXcf - Array2cf - Array3cf - Array4cf - ArrayXcf - Array22cd - Array23cd - Array24cd - Array2Xcd - Array32cd - Array33cd - Array34cd - Array3Xcd - Array42cd - Array43cd - Array44cd - Array4Xcd - ArrayX2cd - ArrayX3cd - ArrayX4cd - ArrayXXcd - Array2cd - Array3cd - Array4cd - ArrayXcd - -ctypedef fused StorageOrder: - RowMajor - ColMajor - -ctypedef fused MapOptions: - Aligned - Unaligned - -cdef extern from "eigency_cpp.h" namespace "eigency": - - cdef cppclass _1 "1": - pass - - cdef cppclass _2 "2": - pass - - cdef cppclass _3 "3": - pass - - cdef cppclass _4 "4": - pass - - cdef cppclass _5 "5": - pass - - cdef cppclass _6 "6": - pass - - cdef cppclass _7 "7": - pass - - cdef cppclass _8 "8": - pass - - cdef cppclass _9 "9": - pass - - cdef cppclass _10 "10": - pass - - cdef cppclass _11 "11": - pass - - cdef cppclass _12 "12": - pass - - cdef cppclass _13 "13": - pass - - cdef cppclass _14 "14": - pass - - cdef cppclass _15 "15": - pass - - cdef cppclass _16 "16": - pass - - cdef cppclass _17 "17": - pass - - cdef cppclass _18 "18": - pass - - cdef cppclass _19 "19": - pass - - cdef cppclass _20 "20": - pass - - cdef cppclass _21 "21": - pass - - cdef cppclass _22 "22": - pass - - cdef cppclass _23 "23": - pass - - cdef cppclass _24 "24": - pass - - cdef cppclass _25 "25": - pass - - cdef cppclass _26 "26": - pass - - cdef cppclass _27 "27": - pass - - cdef cppclass _28 "28": - pass - - cdef cppclass _29 "29": - pass - - cdef cppclass _30 "30": - pass - - cdef cppclass _31 "31": - pass - - cdef cppclass _32 "32": - pass - - cdef cppclass PlainObjectBase: - pass - - cdef cppclass Map[DenseTypeShort](PlainObjectBase): - Map() except + - Map(np.ndarray array) except + - - cdef cppclass FlattenedMap[DenseType, dtype, Rows, Cols]: - FlattenedMap() except + - FlattenedMap(np.ndarray array) except + - - cdef cppclass FlattenedMapWithOrder "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder]: - FlattenedMapWithOrder() except + - FlattenedMapWithOrder(np.ndarray array) except + - - cdef cppclass FlattenedMapWithStride "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder, MapOptions, StrideOuter, StrideInner]: - FlattenedMapWithStride() except + - FlattenedMapWithStride(np.ndarray array) except + - - cdef np.ndarray ndarray_view(PlainObjectBase &) - cdef np.ndarray ndarray_copy(PlainObjectBase &) - cdef np.ndarray ndarray(PlainObjectBase &) - - -cdef extern from "eigency_cpp.h" namespace "Eigen": - - cdef cppclass Dynamic: - pass - - cdef cppclass RowMajor: - pass - - cdef cppclass ColMajor: - pass - - cdef cppclass Aligned: - pass - - cdef cppclass Unaligned: - pass - - cdef cppclass Matrix(PlainObjectBase): - pass - - cdef cppclass Array(PlainObjectBase): - pass - - cdef cppclass VectorXd(PlainObjectBase): - pass - - cdef cppclass Vector1i(PlainObjectBase): - pass - - cdef cppclass Vector2i(PlainObjectBase): - pass - - cdef cppclass Vector3i(PlainObjectBase): - pass - - cdef cppclass Vector4i(PlainObjectBase): - pass - - cdef cppclass VectorXi(PlainObjectBase): - pass - - cdef cppclass RowVector1i(PlainObjectBase): - pass - - cdef cppclass RowVector2i(PlainObjectBase): - pass - - cdef cppclass RowVector3i(PlainObjectBase): - pass - - cdef cppclass RowVector4i(PlainObjectBase): - pass - - cdef cppclass RowVectorXi(PlainObjectBase): - pass - - cdef cppclass Matrix1i(PlainObjectBase): - pass - - cdef cppclass Matrix2i(PlainObjectBase): - pass - - cdef cppclass Matrix3i(PlainObjectBase): - pass - - cdef cppclass Matrix4i(PlainObjectBase): - pass - - cdef cppclass MatrixXi(PlainObjectBase): - pass - - cdef cppclass Vector1f(PlainObjectBase): - pass - - cdef cppclass Vector2f(PlainObjectBase): - pass - - cdef cppclass Vector3f(PlainObjectBase): - pass - - cdef cppclass Vector4f(PlainObjectBase): - pass - - cdef cppclass VectorXf(PlainObjectBase): - pass - - cdef cppclass RowVector1f(PlainObjectBase): - pass - - cdef cppclass RowVector2f(PlainObjectBase): - pass - - cdef cppclass RowVector3f(PlainObjectBase): - pass - - cdef cppclass RowVector4f(PlainObjectBase): - pass - - cdef cppclass RowVectorXf(PlainObjectBase): - pass - - cdef cppclass Matrix1f(PlainObjectBase): - pass - - cdef cppclass Matrix2f(PlainObjectBase): - pass - - cdef cppclass Matrix3f(PlainObjectBase): - pass - - cdef cppclass Matrix4f(PlainObjectBase): - pass - - cdef cppclass MatrixXf(PlainObjectBase): - pass - - cdef cppclass Vector1d(PlainObjectBase): - pass - - cdef cppclass Vector2d(PlainObjectBase): - pass - - cdef cppclass Vector3d(PlainObjectBase): - pass - - cdef cppclass Vector4d(PlainObjectBase): - pass - - cdef cppclass VectorXd(PlainObjectBase): - pass - - cdef cppclass RowVector1d(PlainObjectBase): - pass - - cdef cppclass RowVector2d(PlainObjectBase): - pass - - cdef cppclass RowVector3d(PlainObjectBase): - pass - - cdef cppclass RowVector4d(PlainObjectBase): - pass - - cdef cppclass RowVectorXd(PlainObjectBase): - pass - - cdef cppclass Matrix1d(PlainObjectBase): - pass - - cdef cppclass Matrix2d(PlainObjectBase): - pass - - cdef cppclass Matrix3d(PlainObjectBase): - pass - - cdef cppclass Matrix4d(PlainObjectBase): - pass - - cdef cppclass MatrixXd(PlainObjectBase): - pass - - cdef cppclass Vector1cf(PlainObjectBase): - pass - - cdef cppclass Vector2cf(PlainObjectBase): - pass - - cdef cppclass Vector3cf(PlainObjectBase): - pass - - cdef cppclass Vector4cf(PlainObjectBase): - pass - - cdef cppclass VectorXcf(PlainObjectBase): - pass - - cdef cppclass RowVector1cf(PlainObjectBase): - pass - - cdef cppclass RowVector2cf(PlainObjectBase): - pass - - cdef cppclass RowVector3cf(PlainObjectBase): - pass - - cdef cppclass RowVector4cf(PlainObjectBase): - pass - - cdef cppclass RowVectorXcf(PlainObjectBase): - pass - - cdef cppclass Matrix1cf(PlainObjectBase): - pass - - cdef cppclass Matrix2cf(PlainObjectBase): - pass - - cdef cppclass Matrix3cf(PlainObjectBase): - pass - - cdef cppclass Matrix4cf(PlainObjectBase): - pass - - cdef cppclass MatrixXcf(PlainObjectBase): - pass - - cdef cppclass Vector1cd(PlainObjectBase): - pass - - cdef cppclass Vector2cd(PlainObjectBase): - pass - - cdef cppclass Vector3cd(PlainObjectBase): - pass - - cdef cppclass Vector4cd(PlainObjectBase): - pass - - cdef cppclass VectorXcd(PlainObjectBase): - pass - - cdef cppclass RowVector1cd(PlainObjectBase): - pass - - cdef cppclass RowVector2cd(PlainObjectBase): - pass - - cdef cppclass RowVector3cd(PlainObjectBase): - pass - - cdef cppclass RowVector4cd(PlainObjectBase): - pass - - cdef cppclass RowVectorXcd(PlainObjectBase): - pass - - cdef cppclass Matrix1cd(PlainObjectBase): - pass - - cdef cppclass Matrix2cd(PlainObjectBase): - pass - - cdef cppclass Matrix3cd(PlainObjectBase): - pass - - cdef cppclass Matrix4cd(PlainObjectBase): - pass - - cdef cppclass MatrixXcd(PlainObjectBase): - pass - - cdef cppclass Array22i(PlainObjectBase): - pass - - cdef cppclass Array23i(PlainObjectBase): - pass - - cdef cppclass Array24i(PlainObjectBase): - pass - - cdef cppclass Array2Xi(PlainObjectBase): - pass - - cdef cppclass Array32i(PlainObjectBase): - pass - - cdef cppclass Array33i(PlainObjectBase): - pass - - cdef cppclass Array34i(PlainObjectBase): - pass - - cdef cppclass Array3Xi(PlainObjectBase): - pass - - cdef cppclass Array42i(PlainObjectBase): - pass - - cdef cppclass Array43i(PlainObjectBase): - pass - - cdef cppclass Array44i(PlainObjectBase): - pass - - cdef cppclass Array4Xi(PlainObjectBase): - pass - - cdef cppclass ArrayX2i(PlainObjectBase): - pass - - cdef cppclass ArrayX3i(PlainObjectBase): - pass - - cdef cppclass ArrayX4i(PlainObjectBase): - pass - - cdef cppclass ArrayXXi(PlainObjectBase): - pass - - cdef cppclass Array2i(PlainObjectBase): - pass - - cdef cppclass Array3i(PlainObjectBase): - pass - - cdef cppclass Array4i(PlainObjectBase): - pass - - cdef cppclass ArrayXi(PlainObjectBase): - pass - - cdef cppclass Array22f(PlainObjectBase): - pass - - cdef cppclass Array23f(PlainObjectBase): - pass - - cdef cppclass Array24f(PlainObjectBase): - pass - - cdef cppclass Array2Xf(PlainObjectBase): - pass - - cdef cppclass Array32f(PlainObjectBase): - pass - - cdef cppclass Array33f(PlainObjectBase): - pass - - cdef cppclass Array34f(PlainObjectBase): - pass - - cdef cppclass Array3Xf(PlainObjectBase): - pass - - cdef cppclass Array42f(PlainObjectBase): - pass - - cdef cppclass Array43f(PlainObjectBase): - pass - - cdef cppclass Array44f(PlainObjectBase): - pass - - cdef cppclass Array4Xf(PlainObjectBase): - pass - - cdef cppclass ArrayX2f(PlainObjectBase): - pass - - cdef cppclass ArrayX3f(PlainObjectBase): - pass - - cdef cppclass ArrayX4f(PlainObjectBase): - pass - - cdef cppclass ArrayXXf(PlainObjectBase): - pass - - cdef cppclass Array2f(PlainObjectBase): - pass - - cdef cppclass Array3f(PlainObjectBase): - pass - - cdef cppclass Array4f(PlainObjectBase): - pass - - cdef cppclass ArrayXf(PlainObjectBase): - pass - - cdef cppclass Array22d(PlainObjectBase): - pass - - cdef cppclass Array23d(PlainObjectBase): - pass - - cdef cppclass Array24d(PlainObjectBase): - pass - - cdef cppclass Array2Xd(PlainObjectBase): - pass - - cdef cppclass Array32d(PlainObjectBase): - pass - - cdef cppclass Array33d(PlainObjectBase): - pass - - cdef cppclass Array34d(PlainObjectBase): - pass - - cdef cppclass Array3Xd(PlainObjectBase): - pass - - cdef cppclass Array42d(PlainObjectBase): - pass - - cdef cppclass Array43d(PlainObjectBase): - pass - - cdef cppclass Array44d(PlainObjectBase): - pass - - cdef cppclass Array4Xd(PlainObjectBase): - pass - - cdef cppclass ArrayX2d(PlainObjectBase): - pass - - cdef cppclass ArrayX3d(PlainObjectBase): - pass - - cdef cppclass ArrayX4d(PlainObjectBase): - pass - - cdef cppclass ArrayXXd(PlainObjectBase): - pass - - cdef cppclass Array2d(PlainObjectBase): - pass - - cdef cppclass Array3d(PlainObjectBase): - pass - - cdef cppclass Array4d(PlainObjectBase): - pass - - cdef cppclass ArrayXd(PlainObjectBase): - pass - - cdef cppclass Array22cf(PlainObjectBase): - pass - - cdef cppclass Array23cf(PlainObjectBase): - pass - - cdef cppclass Array24cf(PlainObjectBase): - pass - - cdef cppclass Array2Xcf(PlainObjectBase): - pass - - cdef cppclass Array32cf(PlainObjectBase): - pass - - cdef cppclass Array33cf(PlainObjectBase): - pass - - cdef cppclass Array34cf(PlainObjectBase): - pass - - cdef cppclass Array3Xcf(PlainObjectBase): - pass - - cdef cppclass Array42cf(PlainObjectBase): - pass - - cdef cppclass Array43cf(PlainObjectBase): - pass - - cdef cppclass Array44cf(PlainObjectBase): - pass - - cdef cppclass Array4Xcf(PlainObjectBase): - pass - - cdef cppclass ArrayX2cf(PlainObjectBase): - pass - - cdef cppclass ArrayX3cf(PlainObjectBase): - pass - - cdef cppclass ArrayX4cf(PlainObjectBase): - pass - - cdef cppclass ArrayXXcf(PlainObjectBase): - pass - - cdef cppclass Array2cf(PlainObjectBase): - pass - - cdef cppclass Array3cf(PlainObjectBase): - pass - - cdef cppclass Array4cf(PlainObjectBase): - pass - - cdef cppclass ArrayXcf(PlainObjectBase): - pass - - cdef cppclass Array22cd(PlainObjectBase): - pass - - cdef cppclass Array23cd(PlainObjectBase): - pass - - cdef cppclass Array24cd(PlainObjectBase): - pass - - cdef cppclass Array2Xcd(PlainObjectBase): - pass - - cdef cppclass Array32cd(PlainObjectBase): - pass - - cdef cppclass Array33cd(PlainObjectBase): - pass - - cdef cppclass Array34cd(PlainObjectBase): - pass - - cdef cppclass Array3Xcd(PlainObjectBase): - pass - - cdef cppclass Array42cd(PlainObjectBase): - pass - - cdef cppclass Array43cd(PlainObjectBase): - pass - - cdef cppclass Array44cd(PlainObjectBase): - pass - - cdef cppclass Array4Xcd(PlainObjectBase): - pass - - cdef cppclass ArrayX2cd(PlainObjectBase): - pass - - cdef cppclass ArrayX3cd(PlainObjectBase): - pass - - cdef cppclass ArrayX4cd(PlainObjectBase): - pass - - cdef cppclass ArrayXXcd(PlainObjectBase): - pass - - cdef cppclass Array2cd(PlainObjectBase): - pass - - cdef cppclass Array3cd(PlainObjectBase): - pass - - cdef cppclass Array4cd(PlainObjectBase): - pass - - cdef cppclass ArrayXcd(PlainObjectBase): - pass - - diff --git a/cython/gtsam_eigency/core.pyx b/cython/gtsam_eigency/core.pyx deleted file mode 100644 index 8b1378917..000000000 --- a/cython/gtsam_eigency/core.pyx +++ /dev/null @@ -1 +0,0 @@ - diff --git a/cython/gtsam_eigency/eigency_cpp.h b/cython/gtsam_eigency/eigency_cpp.h deleted file mode 100644 index ce303182e..000000000 --- a/cython/gtsam_eigency/eigency_cpp.h +++ /dev/null @@ -1,504 +0,0 @@ -#include - -#include -#include -#include - -typedef ::std::complex< double > __pyx_t_double_complex; -typedef ::std::complex< float > __pyx_t_float_complex; - -#include "conversions_api.h" - -#ifndef EIGENCY_CPP -#define EIGENCY_CPP - -namespace eigency { - -template -inline PyArrayObject *_ndarray_view(Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0); -template -inline PyArrayObject *_ndarray_copy(const Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0); - -// Strides: -// Eigen and numpy differ in their way of dealing with strides. Eigen has the concept of outer and -// inner strides, which are dependent on whether the array/matrix is row-major of column-major: -// Inner stride: denotes the offset between succeeding elements in each row (row-major) or column (column-major). -// Outer stride: denotes the offset between succeeding rows (row-major) or succeeding columns (column-major). -// In contrast, numpy's stride is simply a measure of how fast each dimension should be incremented. -// Consequently, a switch in numpy storage order from row-major to column-major involves a switch -// in strides, while it does not affect the stride in Eigen. -template<> -inline PyArrayObject *_ndarray_view(double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) { - // Eigen row-major mode: row_stride=outer_stride, and col_stride=inner_stride - // If no stride is given, the row_stride is set to the number of columns. - return ndarray_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - } else { - // Eigen column-major mode: row_stride=outer_stride, and col_stride=inner_stride - // If no stride is given, the cow_stride is set to the number of rows. - return ndarray_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); - } -} -template<> -inline PyArrayObject *_ndarray_copy(const double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view >(std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy >(const std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view >(std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy >(const std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - - -template -inline PyArrayObject *ndarray(Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} -// If C++11 is available, check if m is an r-value reference, in -// which case a copy should always be made -#if __cplusplus >= 201103L -template -inline PyArrayObject *ndarray(Eigen::PlainObjectBase &&m) { - import_gtsam_eigency__conversions(); - return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} -#endif -template -inline PyArrayObject *ndarray(const Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} -template -inline PyArrayObject *ndarray_view(Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} -template -inline PyArrayObject *ndarray_view(const Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(const_cast(m.data()), m.rows(), m.cols(), m.IsRowMajor); -} -template -inline PyArrayObject *ndarray_copy(const Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} - -template -inline PyArrayObject *ndarray(Eigen::Map &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} -template -inline PyArrayObject *ndarray(const Eigen::Map &m) { - import_gtsam_eigency__conversions(); - // Since this is a map, we assume that ownership is correctly taken care - // of, and we avoid taking a copy - return _ndarray_view(const_cast(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} -template -inline PyArrayObject *ndarray_view(Eigen::Map &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} -template -inline PyArrayObject *ndarray_view(const Eigen::Map &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(const_cast(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} -template -inline PyArrayObject *ndarray_copy(const Eigen::Map &m) { - import_gtsam_eigency__conversions(); - return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} - - -template > -class MapBase: public Eigen::Map { -public: - typedef Eigen::Map Base; - typedef typename Base::Scalar Scalar; - - MapBase(Scalar* data, - long rows, - long cols, - _StrideType stride=_StrideType()) - : Base(data, - // If both dimensions are dynamic or dimensions match, accept dimensions as they are - ((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) || - (Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols)) - ? rows - // otherwise, test if swapping them makes them fit - : ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows) - ? cols - : rows), - ((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) || - (Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols)) - ? cols - : ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows) - ? rows - : cols), - stride - ) {} - - MapBase &operator=(const MatrixType &other) { - Base::operator=(other); - return *this; - } - - virtual ~MapBase() { } -}; - - -template class EigencyDenseBase, - typename Scalar, - int _Rows, int _Cols, - int _Options = Eigen::AutoAlign | -#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4 - // workaround a bug in at least gcc 3.4.6 - // the innermost ?: ternary operator is misparsed. We write it slightly - // differently and this makes gcc 3.4.6 happy, but it's ugly. - // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined - // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor) - ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor -// EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19 -#if EIGEN_VERSION_AT_LEAST(3,2,90) - : !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION -#else - : !(_Cols==1 && _Rows!=1) ? Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION -#endif - : ColMajor ), -#else - ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor - : (_Cols==1 && _Rows!=1) ? Eigen::ColMajor -// EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19 -#if EIGEN_VERSION_AT_LEAST(3,2,90) - : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ), -#else - : Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ), -#endif -#endif - int _MapOptions = Eigen::Unaligned, - int _StrideOuter=0, int _StrideInner=0, - int _MaxRows = _Rows, - int _MaxCols = _Cols> -class FlattenedMap: public MapBase, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > { -public: - typedef MapBase, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > Base; - - FlattenedMap() - : Base(NULL, 0, 0), - object_(NULL) {} - - FlattenedMap(Scalar *data, long rows, long cols, long outer_stride=0, long inner_stride=0) - : Base(data, rows, cols, - Eigen::Stride<_StrideOuter, _StrideInner>(outer_stride, inner_stride)), - object_(NULL) { - } - - FlattenedMap(PyArrayObject *object) - : Base((Scalar *)((PyArrayObject*)object)->data, - // : Base(_from_numpy((PyArrayObject*)object), - (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1, - (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0], - Eigen::Stride<_StrideOuter, _StrideInner>(_StrideOuter != Eigen::Dynamic ? _StrideOuter : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1, - _StrideInner != Eigen::Dynamic ? _StrideInner : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0])), - object_(object) { - - if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object)) - throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map."); - - Py_XINCREF(object_); - } - FlattenedMap &operator=(const FlattenedMap &other) { - if (other.object_) { - new (this) FlattenedMap(other.object_); - } else { - // Replace the memory that we point to (not a memory allocation) - new (this) FlattenedMap(const_cast(other.data()), - other.rows(), - other.cols(), - other.outerStride(), - other.innerStride()); - } - - return *this; - } - - operator Base() const { - return static_cast(*this); - } - - operator Base&() const { - return static_cast(*this); - } - - operator EigencyDenseBase() const { - return EigencyDenseBase(static_cast(*this)); - } - - virtual ~FlattenedMap() { - Py_XDECREF(object_); - } - -private: - PyArrayObject * const object_; -}; - - -template -class Map: public MapBase { -public: - typedef MapBase Base; - typedef typename MatrixType::Scalar Scalar; - - enum { - RowsAtCompileTime = Base::Base::RowsAtCompileTime, - ColsAtCompileTime = Base::Base::ColsAtCompileTime - }; - - Map() - : Base(NULL, - (RowsAtCompileTime == Eigen::Dynamic) ? 0 : RowsAtCompileTime, - (ColsAtCompileTime == Eigen::Dynamic) ? 0 : ColsAtCompileTime), - object_(NULL) { - } - - Map(Scalar *data, long rows, long cols) - : Base(data, rows, cols), - object_(NULL) {} - - Map(PyArrayObject *object) - : Base((PyObject*)object == Py_None? NULL: (Scalar *)object->data, - // ROW: If array is in row-major order, transpose (see README) - (PyObject*)object == Py_None? 0 : - (!PyArray_IS_F_CONTIGUOUS(object) - ? ((object->nd == 1) - ? 1 // ROW: If 1D row-major numpy array, set to 1 (row vector) - : object->dimensions[1]) - : object->dimensions[0]), - // COLUMN: If array is in row-major order: transpose (see README) - (PyObject*)object == Py_None? 0 : - (!PyArray_IS_F_CONTIGUOUS(object) - ? object->dimensions[0] - : ((object->nd == 1) - ? 1 // COLUMN: If 1D col-major numpy array, set to length (column vector) - : object->dimensions[1]))), - object_(object) { - - if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object)) - throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map."); - Py_XINCREF(object_); - } - - Map &operator=(const Map &other) { - if (other.object_) { - new (this) Map(other.object_); - } else { - // Replace the memory that we point to (not a memory allocation) - new (this) Map(const_cast(other.data()), - other.rows(), - other.cols()); - } - - return *this; - } - - Map &operator=(const MatrixType &other) { - MapBase::operator=(other); - return *this; - } - - operator Base() const { - return static_cast(*this); - } - - operator Base&() const { - return static_cast(*this); - } - - operator MatrixType() const { - return MatrixType(static_cast(*this)); - } - - virtual ~Map() { - Py_XDECREF(object_); - } - -private: - PyArrayObject * const object_; -}; - - -} - -#endif - - - diff --git a/cython/requirements.txt b/cython/requirements.txt deleted file mode 100644 index 8d3c7aeb4..000000000 --- a/cython/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -Cython>=0.25.2 -backports_abc>=0.5 -numpy>=1.11.0 diff --git a/examples/Data/Klaus3.g2o b/examples/Data/Klaus3.g2o index 4c7233fa7..83a6e6fd2 100644 --- a/examples/Data/Klaus3.g2o +++ b/examples/Data/Klaus3.g2o @@ -1,6 +1,6 @@ -VERTEX_SE3:QUAT 0 -3.865747774038187 0.06639337702667497 -0.16064874691945374 0.024595211709139555 0.49179523413089893 -0.06279232989379242 0.8680954132776109 -VERTEX_SE3:QUAT 1 -3.614793159814815 0.04774490041587656 -0.2837650367985949 0.00991721787943912 0.4854918961891193 -0.042343290945895576 0.8731588132957809 -VERTEX_SE3:QUAT 2 -3.255096913553434 0.013296754286114112 -0.5339792269680574 -0.027851108010665374 0.585478168397957 -0.05088341463532465 0.8086102325762403 -EDGE_SE3:QUAT 0 1 0.2509546142233723 -0.01864847661079841 -0.12311628987914114 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 -EDGE_SE3:QUAT 0 2 0.6106508604847534 -0.05309662274056086 -0.3733304800486037 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 -EDGE_SE3:QUAT 1 2 0.3596962462613811 -0.03444814612976245 -0.25021419016946256 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +VERTEX_SE3:QUAT 0 -1.6618596980158338 -0.5736497760548741 -3.3319774096611026 -0.02676080288219576 -0.024497002638379624 -0.015064701622500615 0.9992281076190063 +VERTEX_SE3:QUAT 1 -1.431820463019384 -0.549139761976065 -3.160677992237872 -0.049543805396343954 -0.03232420352077356 -0.004386230477751116 0.998239108728862 +VERTEX_SE3:QUAT 2 -1.0394840214436651 -0.5268841046291037 -2.972143862665523 -0.07993768981394891 0.0825062894866454 -0.04088089479075661 0.9925378735259738 +EDGE_SE3:QUAT 0 1 0.23003923499644974 0.02451001407880915 0.17129941742323052 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +EDGE_SE3:QUAT 0 2 0.6223756765721686 0.04676567142577037 0.35983354699557957 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +EDGE_SE3:QUAT 1 2 0.3923364415757189 0.022255657346961222 0.18853412957234905 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 diff --git a/examples/Data/example_with_vertices.g2o b/examples/Data/example_with_vertices.g2o index 6c2f1a530..ca7cd86df 100644 --- a/examples/Data/example_with_vertices.g2o +++ b/examples/Data/example_with_vertices.g2o @@ -1,16 +1,16 @@ -VERTEX_SE3:QUAT 8646911284551352320 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162 -VERTEX_SE3:QUAT 8646911284551352321 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508 -VERTEX_SE3:QUAT 8646911284551352322 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10 -VERTEX_SE3:QUAT 8646911284551352323 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508 -VERTEX_SE3:QUAT 8646911284551352324 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162 -VERTEX_SE3:QUAT 8646911284551352325 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567 -VERTEX_SE3:QUAT 8646911284551352326 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412 -VERTEX_SE3:QUAT 8646911284551352327 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567 -VERTEX_TRACKXYZ 7782220156096217088 10 10 10 -VERTEX_TRACKXYZ 7782220156096217089 -10 10 10 -VERTEX_TRACKXYZ 7782220156096217090 -10 -10 10 -VERTEX_TRACKXYZ 7782220156096217091 10 -10 10 -VERTEX_TRACKXYZ 7782220156096217092 10 10 -10 -VERTEX_TRACKXYZ 7782220156096217093 -10 10 -10 -VERTEX_TRACKXYZ 7782220156096217094 -10 -10 -10 -VERTEX_TRACKXYZ 7782220156096217095 10 -10 -10 +VERTEX_SE3:QUAT 0 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162 +VERTEX_SE3:QUAT 1 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508 +VERTEX_SE3:QUAT 2 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10 +VERTEX_SE3:QUAT 3 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508 +VERTEX_SE3:QUAT 4 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162 +VERTEX_SE3:QUAT 5 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567 +VERTEX_SE3:QUAT 6 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412 +VERTEX_SE3:QUAT 7 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567 +VERTEX_TRACKXYZ 0 10 10 10 +VERTEX_TRACKXYZ 1 -10 10 10 +VERTEX_TRACKXYZ 2 -10 -10 10 +VERTEX_TRACKXYZ 3 10 -10 10 +VERTEX_TRACKXYZ 4 10 10 -10 +VERTEX_TRACKXYZ 5 -10 10 -10 +VERTEX_TRACKXYZ 6 -10 -10 -10 +VERTEX_TRACKXYZ 7 10 -10 -10 diff --git a/examples/IMUKittiExampleGPS.cpp b/examples/IMUKittiExampleGPS.cpp index f1d89b47a..e2ca49647 100644 --- a/examples/IMUKittiExampleGPS.cpp +++ b/examples/IMUKittiExampleGPS.cpp @@ -287,7 +287,7 @@ int main(int argc, char* argv[]) { new_values.insert(current_pose_key, gps_pose); printf("################ POSE INCLUDED AT TIME %lf ################\n", t); - gps_pose.translation().print(); + cout << gps_pose.translation(); printf("\n\n"); } else { new_values.insert(current_pose_key, current_pose_global); diff --git a/examples/ShonanAveragingCLI.cpp b/examples/ShonanAveragingCLI.cpp new file mode 100644 index 000000000..09221fda2 --- /dev/null +++ b/examples/ShonanAveragingCLI.cpp @@ -0,0 +1,125 @@ +/* ---------------------------------------------------------------------------- + +* GTSAM Copyright 2010, Georgia Tech Research Corporation, +* Atlanta, Georgia 30332-0415 +* All Rights Reserved +* Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +* See LICENSE for the license information +* -------------------------------------------------------------------------- */ + +/** + * @file ShonanAveragingCLI.cpp + * @brief Run Shonan Rotation Averaging Algorithm on a file or example dataset + * @author Frank Dellaert + * @date August, 2020 + * + * Example usage: + * + * Running without arguments will run on tiny 3D example pose3example-grid + * ./ShonanAveragingCLI + * + * Read 2D dataset w10000 (in examples/data) and output to w10000-rotations.g2o + * ./ShonanAveragingCLI -d 2 -n w10000 -o w10000-rotations.g2o + * + * Read 3D dataset sphere25000.txt and output to shonan.g2o (default) + * ./ShonanAveragingCLI -i spere2500.txt + * + */ + +#include +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; +namespace po = boost::program_options; + +/* ************************************************************************* */ +int main(int argc, char* argv[]) { + string datasetName; + string inputFile; + string outputFile; + int d, seed; + po::options_description desc( + "Shonan Rotation Averaging CLI reads a *pose* graph, extracts the " + "rotation constraints, and runs the Shonan algorithm."); + desc.add_options()("help", "Print help message")( + "named_dataset,n", + po::value(&datasetName)->default_value("pose3example-grid"), + "Find and read frome example dataset file")( + "input_file,i", po::value(&inputFile)->default_value(""), + "Read pose constraints graph from the specified file")( + "output_file,o", + po::value(&outputFile)->default_value("shonan.g2o"), + "Write solution to the specified file")( + "dimension,d", po::value(&d)->default_value(3), + "Optimize over 2D or 3D rotations")( + "seed,s", po::value(&seed)->default_value(42), + "Random seed for initial estimate"); + po::variables_map vm; + po::store(po::command_line_parser(argc, argv).options(desc).run(), vm); + po::notify(vm); + + if (vm.count("help")) { + cout << desc << "\n"; + return 1; + } + + // Get input file + if (inputFile.empty()) { + if (datasetName.empty()) { + cout << "You must either specify a named dataset or an input file\n" + << desc << endl; + return 1; + } + inputFile = findExampleDataFile(datasetName); + } + + // Seed random number generator + static std::mt19937 rng(seed); + + NonlinearFactorGraph::shared_ptr inputGraph; + Values::shared_ptr posesInFile; + Values poses; + if (d == 2) { + cout << "Running Shonan averaging for SO(2) on " << inputFile << endl; + ShonanAveraging2 shonan(inputFile); + auto initial = shonan.initializeRandomly(rng); + auto result = shonan.run(initial); + + // Parse file again to set up translation problem, adding a prior + boost::tie(inputGraph, posesInFile) = load2D(inputFile); + auto priorModel = noiseModel::Unit::Create(3); + inputGraph->addPrior(0, posesInFile->at(0), priorModel); + + cout << "recovering 2D translations" << endl; + auto poseGraph = initialize::buildPoseGraph(*inputGraph); + poses = initialize::computePoses(result.first, &poseGraph); + } else if (d == 3) { + cout << "Running Shonan averaging for SO(3) on " << inputFile << endl; + ShonanAveraging3 shonan(inputFile); + auto initial = shonan.initializeRandomly(rng); + auto result = shonan.run(initial); + + // Parse file again to set up translation problem, adding a prior + boost::tie(inputGraph, posesInFile) = load3D(inputFile); + auto priorModel = noiseModel::Unit::Create(6); + inputGraph->addPrior(0, posesInFile->at(0), priorModel); + + cout << "recovering 3D translations" << endl; + auto poseGraph = initialize::buildPoseGraph(*inputGraph); + poses = initialize::computePoses(result.first, &poseGraph); + } else { + cout << "Can only run SO(2) or SO(3) averaging\n" << desc << endl; + return 1; + } + cout << "Writing result to " << outputFile << endl; + writeG2o(NonlinearFactorGraph(), poses, outputFile); + return 0; +} + +/* ************************************************************************* */ diff --git a/gtsam/3rdparty/CMakeLists.txt b/gtsam/3rdparty/CMakeLists.txt index c8fecc339..8b356393b 100644 --- a/gtsam/3rdparty/CMakeLists.txt +++ b/gtsam/3rdparty/CMakeLists.txt @@ -1,7 +1,8 @@ -# install CCOLAMD headers +# install CCOLAMD and SuiteSparse_config headers install(FILES CCOLAMD/Include/ccolamd.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/CCOLAMD) install(FILES SuiteSparse_config/SuiteSparse_config.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/SuiteSparse_config) +# install Eigen unless System Eigen is used if(NOT GTSAM_USE_SYSTEM_EIGEN) # Find plain .h files file(GLOB_RECURSE eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/Eigen/*.h") diff --git a/gtsam/3rdparty/Spectra/GenEigsBase.h b/gtsam/3rdparty/Spectra/GenEigsBase.h new file mode 100644 index 000000000..e084033d7 --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsBase.h @@ -0,0 +1,482 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_BASE_H +#define GEN_EIGS_BASE_H + +#include +#include // std::vector +#include // std::abs, std::pow, std::sqrt +#include // std::min, std::copy +#include // std::complex, std::conj, std::norm, std::abs +#include // std::invalid_argument + +#include "Util/TypeTraits.h" +#include "Util/SelectionRule.h" +#include "Util/CompInfo.h" +#include "Util/SimpleRandom.h" +#include "MatOp/internal/ArnoldiOp.h" +#include "LinAlg/UpperHessenbergQR.h" +#include "LinAlg/DoubleShiftQR.h" +#include "LinAlg/UpperHessenbergEigen.h" +#include "LinAlg/Arnoldi.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This is the base class for general eigen solvers, mainly for internal use. +/// It is kept here to provide the documentation for member functions of concrete eigen solvers +/// such as GenEigsSolver and GenEigsRealShiftSolver. +/// +template +class GenEigsBase +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Array Array; + typedef Eigen::Array BoolArray; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstVec; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + typedef ArnoldiOp ArnoldiOpType; + typedef Arnoldi ArnoldiFac; + +protected: + // clang-format off + OpType* m_op; // object to conduct matrix operation, + // e.g. matrix-vector product + const Index m_n; // dimension of matrix A + const Index m_nev; // number of eigenvalues requested + const Index m_ncv; // dimension of Krylov subspace in the Arnoldi method + Index m_nmatop; // number of matrix operations called + Index m_niter; // number of restarting iterations + + ArnoldiFac m_fac; // Arnoldi factorization + + ComplexVector m_ritz_val; // Ritz values + ComplexMatrix m_ritz_vec; // Ritz vectors + ComplexVector m_ritz_est; // last row of m_ritz_vec + +private: + BoolArray m_ritz_conv; // indicator of the convergence of Ritz values + int m_info; // status of the computation + + const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type + const Scalar m_eps23; // m_eps^(2/3), used to test the convergence + // clang-format on + + // Real Ritz values calculated from UpperHessenbergEigen have exact zero imaginary part + // Complex Ritz values have exact conjugate pairs + // So we use exact tests here + static bool is_complex(const Complex& v) { return v.imag() != Scalar(0); } + static bool is_conj(const Complex& v1, const Complex& v2) { return v1 == Eigen::numext::conj(v2); } + + // Implicitly restarted Arnoldi factorization + void restart(Index k) + { + using std::norm; + + if (k >= m_ncv) + return; + + DoubleShiftQR decomp_ds(m_ncv); + UpperHessenbergQR decomp_hb(m_ncv); + Matrix Q = Matrix::Identity(m_ncv, m_ncv); + + for (Index i = k; i < m_ncv; i++) + { + if (is_complex(m_ritz_val[i]) && is_conj(m_ritz_val[i], m_ritz_val[i + 1])) + { + // H - mu * I = Q1 * R1 + // H <- R1 * Q1 + mu * I = Q1' * H * Q1 + // H - conj(mu) * I = Q2 * R2 + // H <- R2 * Q2 + conj(mu) * I = Q2' * H * Q2 + // + // (H - mu * I) * (H - conj(mu) * I) = Q1 * Q2 * R2 * R1 = Q * R + const Scalar s = Scalar(2) * m_ritz_val[i].real(); + const Scalar t = norm(m_ritz_val[i]); + + decomp_ds.compute(m_fac.matrix_H(), s, t); + + // Q -> Q * Qi + decomp_ds.apply_YQ(Q); + // H -> Q'HQ + // Matrix Q = Matrix::Identity(m_ncv, m_ncv); + // decomp_ds.apply_YQ(Q); + // m_fac_H = Q.transpose() * m_fac_H * Q; + m_fac.compress_H(decomp_ds); + + i++; + } + else + { + // QR decomposition of H - mu * I, mu is real + decomp_hb.compute(m_fac.matrix_H(), m_ritz_val[i].real()); + + // Q -> Q * Qi + decomp_hb.apply_YQ(Q); + // H -> Q'HQ = RQ + mu * I + m_fac.compress_H(decomp_hb); + } + } + + m_fac.compress_V(Q); + m_fac.factorize_from(k, m_ncv, m_nmatop); + + retrieve_ritzpair(); + } + + // Calculates the number of converged Ritz values + Index num_converged(Scalar tol) + { + // thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value + Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23); + Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm(); + // Converged "wanted" Ritz values + m_ritz_conv = (resid < thresh); + + return m_ritz_conv.cast().sum(); + } + + // Returns the adjusted nev for restarting + Index nev_adjusted(Index nconv) + { + using std::abs; + + Index nev_new = m_nev; + for (Index i = m_nev; i < m_ncv; i++) + if (abs(m_ritz_est[i]) < m_near_0) + nev_new++; + + // Adjust nev_new, according to dnaup2.f line 660~674 in ARPACK + nev_new += std::min(nconv, (m_ncv - nev_new) / 2); + if (nev_new == 1 && m_ncv >= 6) + nev_new = m_ncv / 2; + else if (nev_new == 1 && m_ncv > 3) + nev_new = 2; + + if (nev_new > m_ncv - 2) + nev_new = m_ncv - 2; + + // Increase nev by one if ritz_val[nev - 1] and + // ritz_val[nev] are conjugate pairs + if (is_complex(m_ritz_val[nev_new - 1]) && + is_conj(m_ritz_val[nev_new - 1], m_ritz_val[nev_new])) + { + nev_new++; + } + + return nev_new; + } + + // Retrieves and sorts Ritz values and Ritz vectors + void retrieve_ritzpair() + { + UpperHessenbergEigen decomp(m_fac.matrix_H()); + const ComplexVector& evals = decomp.eigenvalues(); + ComplexMatrix evecs = decomp.eigenvectors(); + + SortEigenvalue sorting(evals.data(), evals.size()); + std::vector ind = sorting.index(); + + // Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively + for (Index i = 0; i < m_ncv; i++) + { + m_ritz_val[i] = evals[ind[i]]; + m_ritz_est[i] = evecs(m_ncv - 1, ind[i]); + } + for (Index i = 0; i < m_nev; i++) + { + m_ritz_vec.col(i).noalias() = evecs.col(ind[i]); + } + } + +protected: + // Sorts the first nev Ritz pairs in the specified order + // This is used to return the final results + virtual void sort_ritzpair(int sort_rule) + { + // First make sure that we have a valid index vector + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + std::vector ind = sorting.index(); + + switch (sort_rule) + { + case LARGEST_MAGN: + break; + case LARGEST_REAL: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case LARGEST_IMAG: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_MAGN: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_REAL: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_IMAG: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + default: + throw std::invalid_argument("unsupported sorting rule"); + } + + ComplexVector new_ritz_val(m_ncv); + ComplexMatrix new_ritz_vec(m_ncv, m_nev); + BoolArray new_ritz_conv(m_nev); + + for (Index i = 0; i < m_nev; i++) + { + new_ritz_val[i] = m_ritz_val[ind[i]]; + new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]); + new_ritz_conv[i] = m_ritz_conv[ind[i]]; + } + + m_ritz_val.swap(new_ritz_val); + m_ritz_vec.swap(new_ritz_vec); + m_ritz_conv.swap(new_ritz_conv); + } + +public: + /// \cond + + GenEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) : + m_op(op), + m_n(m_op->rows()), + m_nev(nev), + m_ncv(ncv > m_n ? m_n : ncv), + m_nmatop(0), + m_niter(0), + m_fac(ArnoldiOpType(op, Bop), m_ncv), + m_info(NOT_COMPUTED), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3)) + { + if (nev < 1 || nev > m_n - 2) + throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 2, n is the size of matrix"); + + if (ncv < nev + 2 || ncv > m_n) + throw std::invalid_argument("ncv must satisfy nev + 2 <= ncv <= n, n is the size of matrix"); + } + + /// + /// Virtual destructor + /// + virtual ~GenEigsBase() {} + + /// \endcond + + /// + /// Initializes the solver by providing an initial residual vector. + /// + /// \param init_resid Pointer to the initial residual vector. + /// + /// **Spectra** (and also **ARPACK**) uses an iterative algorithm + /// to find eigenvalues. This function allows the user to provide the initial + /// residual vector. + /// + void init(const Scalar* init_resid) + { + // Reset all matrices/vectors to zero + m_ritz_val.resize(m_ncv); + m_ritz_vec.resize(m_ncv, m_nev); + m_ritz_est.resize(m_ncv); + m_ritz_conv.resize(m_nev); + + m_ritz_val.setZero(); + m_ritz_vec.setZero(); + m_ritz_est.setZero(); + m_ritz_conv.setZero(); + + m_nmatop = 0; + m_niter = 0; + + // Initialize the Arnoldi factorization + MapConstVec v0(init_resid, m_n); + m_fac.init(v0, m_nmatop); + } + + /// + /// Initializes the solver by providing a random initial residual vector. + /// + /// This overloaded function generates a random initial residual vector + /// (with a fixed random seed) for the algorithm. Elements in the vector + /// follow independent Uniform(-0.5, 0.5) distribution. + /// + void init() + { + SimpleRandom rng(0); + Vector init_resid = rng.random_vec(m_n); + init(init_resid.data()); + } + + /// + /// Conducts the major computation procedure. + /// + /// \param maxit Maximum number of iterations allowed in the algorithm. + /// \param tol Precision parameter for the calculated eigenvalues. + /// \param sort_rule Rule to sort the eigenvalues and eigenvectors. + /// Supported values are + /// `Spectra::LARGEST_MAGN`, `Spectra::LARGEST_REAL`, + /// `Spectra::LARGEST_IMAG`, `Spectra::SMALLEST_MAGN`, + /// `Spectra::SMALLEST_REAL` and `Spectra::SMALLEST_IMAG`, + /// for example `LARGEST_MAGN` indicates that eigenvalues + /// with largest magnitude come first. + /// Note that this argument is only used to + /// **sort** the final result, and the **selection** rule + /// (e.g. selecting the largest or smallest eigenvalues in the + /// full spectrum) is specified by the template parameter + /// `SelectionRule` of GenEigsSolver. + /// + /// \return Number of converged eigenvalues. + /// + Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_MAGN) + { + // The m-step Arnoldi factorization + m_fac.factorize_from(1, m_ncv, m_nmatop); + retrieve_ritzpair(); + // Restarting + Index i, nconv = 0, nev_adj; + for (i = 0; i < maxit; i++) + { + nconv = num_converged(tol); + if (nconv >= m_nev) + break; + + nev_adj = nev_adjusted(nconv); + restart(nev_adj); + } + // Sorting results + sort_ritzpair(sort_rule); + + m_niter += i + 1; + m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING; + + return std::min(m_nev, nconv); + } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Returns the number of iterations used in the computation. + /// + Index num_iterations() const { return m_niter; } + + /// + /// Returns the number of matrix operations used in the computation. + /// + Index num_operations() const { return m_nmatop; } + + /// + /// Returns the converged eigenvalues. + /// + /// \return A complex-valued vector containing the eigenvalues. + /// Returned vector type will be `Eigen::Vector, ...>`, depending on + /// the template parameter `Scalar` defined. + /// + ComplexVector eigenvalues() const + { + const Index nconv = m_ritz_conv.cast().sum(); + ComplexVector res(nconv); + + if (!nconv) + return res; + + Index j = 0; + for (Index i = 0; i < m_nev; i++) + { + if (m_ritz_conv[i]) + { + res[j] = m_ritz_val[i]; + j++; + } + } + + return res; + } + + /// + /// Returns the eigenvectors associated with the converged eigenvalues. + /// + /// \param nvec The number of eigenvectors to return. + /// + /// \return A complex-valued matrix containing the eigenvectors. + /// Returned matrix type will be `Eigen::Matrix, ...>`, + /// depending on the template parameter `Scalar` defined. + /// + ComplexMatrix eigenvectors(Index nvec) const + { + const Index nconv = m_ritz_conv.cast().sum(); + nvec = std::min(nvec, nconv); + ComplexMatrix res(m_n, nvec); + + if (!nvec) + return res; + + ComplexMatrix ritz_vec_conv(m_ncv, nvec); + Index j = 0; + for (Index i = 0; i < m_nev && j < nvec; i++) + { + if (m_ritz_conv[i]) + { + ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i); + j++; + } + } + + res.noalias() = m_fac.matrix_V() * ritz_vec_conv; + + return res; + } + + /// + /// Returns all converged eigenvectors. + /// + ComplexMatrix eigenvectors() const + { + return eigenvectors(m_nev); + } +}; + +} // namespace Spectra + +#endif // GEN_EIGS_BASE_H diff --git a/gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h b/gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h new file mode 100644 index 000000000..da7c1b422 --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h @@ -0,0 +1,157 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_COMPLEX_SHIFT_SOLVER_H +#define GEN_EIGS_COMPLEX_SHIFT_SOLVER_H + +#include + +#include "GenEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseGenComplexShiftSolve.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for general real matrices with +/// a complex shift value in the **shift-and-invert mode**. The background +/// knowledge of the shift-and-invert mode can be found in the documentation +/// of the SymEigsShiftSolver class. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the shifted-and-inverted eigenvalues. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the DenseGenComplexShiftSolve wrapper class, or define their +/// own that implements all the public member functions as in +/// DenseGenComplexShiftSolve. +/// +template > +class GenEigsComplexShiftSolver : public GenEigsBase +{ +private: + typedef Eigen::Index Index; + typedef std::complex Complex; + typedef Eigen::Matrix Vector; + typedef Eigen::Matrix ComplexVector; + + const Scalar m_sigmar; + const Scalar m_sigmai; + + // First transform back the Ritz values, and then sort + void sort_ritzpair(int sort_rule) + { + using std::abs; + using std::sqrt; + using std::norm; + + // The eigenvalues we get from the iteration is + // nu = 0.5 * (1 / (lambda - sigma) + 1 / (lambda - conj(sigma))) + // So the eigenvalues of the original problem is + // 1 \pm sqrt(1 - 4 * nu^2 * sigmai^2) + // lambda = sigmar + ----------------------------------- + // 2 * nu + // We need to pick the correct root + // Let (lambdaj, vj) be the j-th eigen pair, then A * vj = lambdaj * vj + // and inv(A - r * I) * vj = 1 / (lambdaj - r) * vj + // where r is any shift value. + // We can use this identity to determine lambdaj + // + // op(v) computes Re(inv(A - r * I) * v) for any real v + // If r is real, then op(v) is also real. Let a = Re(vj), b = Im(vj), + // then op(vj) = op(a) + op(b) * i + // By comparing op(vj) and [1 / (lambdaj - r) * vj], we can determine + // which one is the correct root + + // Select a random shift value + SimpleRandom rng(0); + const Scalar shiftr = rng.random() * m_sigmar + rng.random(); + const Complex shift = Complex(shiftr, Scalar(0)); + this->m_op->set_shift(shiftr, Scalar(0)); + + // Calculate inv(A - r * I) * vj + Vector v_real(this->m_n), v_imag(this->m_n), OPv_real(this->m_n), OPv_imag(this->m_n); + const Scalar eps = Eigen::NumTraits::epsilon(); + for (Index i = 0; i < this->m_nev; i++) + { + v_real.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).real(); + v_imag.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).imag(); + this->m_op->perform_op(v_real.data(), OPv_real.data()); + this->m_op->perform_op(v_imag.data(), OPv_imag.data()); + + // Two roots computed from the quadratic equation + const Complex nu = this->m_ritz_val[i]; + const Complex root_part1 = m_sigmar + Scalar(0.5) / nu; + const Complex root_part2 = Scalar(0.5) * sqrt(Scalar(1) - Scalar(4) * m_sigmai * m_sigmai * (nu * nu)) / nu; + const Complex root1 = root_part1 + root_part2; + const Complex root2 = root_part1 - root_part2; + + // Test roots + Scalar err1 = Scalar(0), err2 = Scalar(0); + for (int k = 0; k < this->m_n; k++) + { + const Complex rhs1 = Complex(v_real[k], v_imag[k]) / (root1 - shift); + const Complex rhs2 = Complex(v_real[k], v_imag[k]) / (root2 - shift); + const Complex OPv = Complex(OPv_real[k], OPv_imag[k]); + err1 += norm(OPv - rhs1); + err2 += norm(OPv - rhs2); + } + + const Complex lambdaj = (err1 < err2) ? root1 : root2; + this->m_ritz_val[i] = lambdaj; + + if (abs(Eigen::numext::imag(lambdaj)) > eps) + { + this->m_ritz_val[i + 1] = Eigen::numext::conj(lambdaj); + i++; + } + else + { + this->m_ritz_val[i] = Complex(Eigen::numext::real(lambdaj), Scalar(0)); + } + } + + GenEigsBase::sort_ritzpair(sort_rule); + } + +public: + /// + /// Constructor to create a eigen solver object using the shift-and-invert mode. + /// + /// \param op Pointer to the matrix operation object. This class should implement + /// the complex shift-solve operation of \f$A\f$: calculating + /// \f$\mathrm{Re}\{(A-\sigma I)^{-1}v\}\f$ for any vector \f$v\f$. Users could either + /// create the object from the DenseGenComplexShiftSolve wrapper class, or + /// define their own that implements all the public member functions + /// as in DenseGenComplexShiftSolve. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. + /// \param sigmar The real part of the shift. + /// \param sigmai The imaginary part of the shift. + /// + GenEigsComplexShiftSolver(OpType* op, Index nev, Index ncv, const Scalar& sigmar, const Scalar& sigmai) : + GenEigsBase(op, NULL, nev, ncv), + m_sigmar(sigmar), m_sigmai(sigmai) + { + this->m_op->set_shift(m_sigmar, m_sigmai); + } +}; + +} // namespace Spectra + +#endif // GEN_EIGS_COMPLEX_SHIFT_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h b/gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h new file mode 100644 index 000000000..6f28308f1 --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h @@ -0,0 +1,89 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_REAL_SHIFT_SOLVER_H +#define GEN_EIGS_REAL_SHIFT_SOLVER_H + +#include + +#include "GenEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseGenRealShiftSolve.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for general real matrices with +/// a real shift value in the **shift-and-invert mode**. The background +/// knowledge of the shift-and-invert mode can be found in the documentation +/// of the SymEigsShiftSolver class. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the shifted-and-inverted eigenvalues. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseGenRealShiftSolve and +/// SparseGenRealShiftSolve, or define their +/// own that implements all the public member functions as in +/// DenseGenRealShiftSolve. +/// +template > +class GenEigsRealShiftSolver : public GenEigsBase +{ +private: + typedef Eigen::Index Index; + typedef std::complex Complex; + typedef Eigen::Array ComplexArray; + + const Scalar m_sigma; + + // First transform back the Ritz values, and then sort + void sort_ritzpair(int sort_rule) + { + // The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma) + // So the eigenvalues of the original problem is lambda = 1 / nu + sigma + ComplexArray ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma; + this->m_ritz_val.head(this->m_nev) = ritz_val_org; + GenEigsBase::sort_ritzpair(sort_rule); + } + +public: + /// + /// Constructor to create a eigen solver object using the shift-and-invert mode. + /// + /// \param op Pointer to the matrix operation object. This class should implement + /// the shift-solve operation of \f$A\f$: calculating + /// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseGenRealShiftSolve, or + /// define their own that implements all the public member functions + /// as in DenseGenRealShiftSolve. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. + /// \param sigma The real-valued shift. + /// + GenEigsRealShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) : + GenEigsBase(op, NULL, nev, ncv), + m_sigma(sigma) + { + this->m_op->set_shift(m_sigma); + } +}; + +} // namespace Spectra + +#endif // GEN_EIGS_REAL_SHIFT_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/GenEigsSolver.h b/gtsam/3rdparty/Spectra/GenEigsSolver.h new file mode 100644 index 000000000..3ead1dc4d --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsSolver.h @@ -0,0 +1,158 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_SOLVER_H +#define GEN_EIGS_SOLVER_H + +#include + +#include "GenEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseGenMatProd.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for general real matrices, i.e., +/// to solve \f$Ax=\lambda x\f$ for a possibly non-symmetric \f$A\f$ matrix. +/// +/// Most of the background information documented in the SymEigsSolver class +/// also applies to the GenEigsSolver class here, except that the eigenvalues +/// and eigenvectors of a general matrix can now be complex-valued. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseGenMatProd and +/// SparseGenMatProd, or define their +/// own that implements all the public member functions as in +/// DenseGenMatProd. +/// +/// An example that illustrates the usage of GenEigsSolver is give below: +/// +/// \code{.cpp} +/// #include +/// #include +/// // is implicitly included +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // We are going to calculate the eigenvalues of M +/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(10, 10); +/// +/// // Construct matrix operation object using the wrapper class +/// DenseGenMatProd op(M); +/// +/// // Construct eigen solver object, requesting the largest +/// // (in magnitude, or norm) three eigenvalues +/// GenEigsSolver< double, LARGEST_MAGN, DenseGenMatProd > eigs(&op, 3, 6); +/// +/// // Initialize and compute +/// eigs.init(); +/// int nconv = eigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXcd evalues; +/// if(eigs.info() == SUCCESSFUL) +/// evalues = eigs.eigenvalues(); +/// +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// +/// return 0; +/// } +/// \endcode +/// +/// And also an example for sparse matrices: +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // A band matrix with 1 on the main diagonal, 2 on the below-main subdiagonal, +/// // and 3 on the above-main subdiagonal +/// const int n = 10; +/// Eigen::SparseMatrix M(n, n); +/// M.reserve(Eigen::VectorXi::Constant(n, 3)); +/// for(int i = 0; i < n; i++) +/// { +/// M.insert(i, i) = 1.0; +/// if(i > 0) +/// M.insert(i - 1, i) = 3.0; +/// if(i < n - 1) +/// M.insert(i + 1, i) = 2.0; +/// } +/// +/// // Construct matrix operation object using the wrapper class SparseGenMatProd +/// SparseGenMatProd op(M); +/// +/// // Construct eigen solver object, requesting the largest three eigenvalues +/// GenEigsSolver< double, LARGEST_MAGN, SparseGenMatProd > eigs(&op, 3, 6); +/// +/// // Initialize and compute +/// eigs.init(); +/// int nconv = eigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXcd evalues; +/// if(eigs.info() == SUCCESSFUL) +/// evalues = eigs.eigenvalues(); +/// +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// +/// return 0; +/// } +/// \endcode +template > +class GenEigsSolver : public GenEigsBase +{ +private: + typedef Eigen::Index Index; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the matrix operation object, which should implement + /// the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseGenMatProd, or + /// define their own that implements all the public member functions + /// as in DenseGenMatProd. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. + /// + GenEigsSolver(OpType* op, Index nev, Index ncv) : + GenEigsBase(op, NULL, nev, ncv) + {} +}; + +} // namespace Spectra + +#endif // GEN_EIGS_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h b/gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h new file mode 100644 index 000000000..730ba9556 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h @@ -0,0 +1,284 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef ARNOLDI_H +#define ARNOLDI_H + +#include +#include // std::sqrt +#include // std::invalid_argument +#include // std::stringstream + +#include "../MatOp/internal/ArnoldiOp.h" +#include "../Util/TypeTraits.h" +#include "../Util/SimpleRandom.h" +#include "UpperHessenbergQR.h" +#include "DoubleShiftQR.h" + +namespace Spectra { + +// Arnoldi factorization A * V = V * H + f * e' +// A: n x n +// V: n x k +// H: k x k +// f: n x 1 +// e: [0, ..., 0, 1] +// V and H are allocated of dimension m, so the maximum value of k is m +template +class Arnoldi +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstMat; + typedef Eigen::Map MapConstVec; + +protected: + // clang-format off + ArnoldiOpType m_op; // Operators for the Arnoldi factorization + + const Index m_n; // dimension of A + const Index m_m; // maximum dimension of subspace V + Index m_k; // current dimension of subspace V + + Matrix m_fac_V; // V matrix in the Arnoldi factorization + Matrix m_fac_H; // H matrix in the Arnoldi factorization + Vector m_fac_f; // residual in the Arnoldi factorization + Scalar m_beta; // ||f||, B-norm of f + + const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type + // clang-format on + + // Given orthonormal basis functions V, find a nonzero vector f such that V'Bf = 0 + // Assume that f has been properly allocated + void expand_basis(MapConstMat& V, const Index seed, Vector& f, Scalar& fnorm) + { + using std::sqrt; + + const Scalar thresh = m_eps * sqrt(Scalar(m_n)); + Vector Vf(V.cols()); + for (Index iter = 0; iter < 5; iter++) + { + // Randomly generate a new vector and orthogonalize it against V + SimpleRandom rng(seed + 123 * iter); + f.noalias() = rng.random_vec(m_n); + // f <- f - V * V'Bf, so that f is orthogonal to V in B-norm + m_op.trans_product(V, f, Vf); + f.noalias() -= V * Vf; + // fnorm <- ||f|| + fnorm = m_op.norm(f); + + // If fnorm is too close to zero, we try a new random vector, + // otherwise return the result + if (fnorm >= thresh) + return; + } + } + +public: + Arnoldi(const ArnoldiOpType& op, Index m) : + m_op(op), m_n(op.rows()), m_m(m), m_k(0), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()) + {} + + virtual ~Arnoldi() {} + + // Const-reference to internal structures + const Matrix& matrix_V() const { return m_fac_V; } + const Matrix& matrix_H() const { return m_fac_H; } + const Vector& vector_f() const { return m_fac_f; } + Scalar f_norm() const { return m_beta; } + Index subspace_dim() const { return m_k; } + + // Initialize with an operator and an initial vector + void init(MapConstVec& v0, Index& op_counter) + { + m_fac_V.resize(m_n, m_m); + m_fac_H.resize(m_m, m_m); + m_fac_f.resize(m_n); + m_fac_H.setZero(); + + // Verify the initial vector + const Scalar v0norm = m_op.norm(v0); + if (v0norm < m_near_0) + throw std::invalid_argument("initial residual vector cannot be zero"); + + // Points to the first column of V + MapVec v(m_fac_V.data(), m_n); + + // Normalize + v.noalias() = v0 / v0norm; + + // Compute H and f + Vector w(m_n); + m_op.perform_op(v.data(), w.data()); + op_counter++; + + m_fac_H(0, 0) = m_op.inner_product(v, w); + m_fac_f.noalias() = w - v * m_fac_H(0, 0); + + // In some cases f is zero in exact arithmetics, but due to rounding errors + // it may contain tiny fluctuations. When this happens, we force f to be zero + if (m_fac_f.cwiseAbs().maxCoeff() < m_eps) + { + m_fac_f.setZero(); + m_beta = Scalar(0); + } + else + { + m_beta = m_op.norm(m_fac_f); + } + + // Indicate that this is a step-1 factorization + m_k = 1; + } + + // Arnoldi factorization starting from step-k + virtual void factorize_from(Index from_k, Index to_m, Index& op_counter) + { + using std::sqrt; + + if (to_m <= from_k) + return; + + if (from_k > m_k) + { + std::stringstream msg; + msg << "Arnoldi: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")"; + throw std::invalid_argument(msg.str()); + } + + const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n)); + + // Pre-allocate vectors + Vector Vf(to_m); + Vector w(m_n); + + // Keep the upperleft k x k submatrix of H and set other elements to 0 + m_fac_H.rightCols(m_m - from_k).setZero(); + m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero(); + + for (Index i = from_k; i <= to_m - 1; i++) + { + bool restart = false; + // If beta = 0, then the next V is not full rank + // We need to generate a new residual vector that is orthogonal + // to the current V, which we call a restart + if (m_beta < m_near_0) + { + MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns + expand_basis(V, 2 * i, m_fac_f, m_beta); + restart = true; + } + + // v <- f / ||f|| + m_fac_V.col(i).noalias() = m_fac_f / m_beta; // The (i+1)-th column + + // Note that H[i+1, i] equals to the unrestarted beta + m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta; + + // w <- A * v, v = m_fac_V.col(i) + m_op.perform_op(&m_fac_V(0, i), w.data()); + op_counter++; + + const Index i1 = i + 1; + // First i+1 columns of V + MapConstMat Vs(m_fac_V.data(), m_n, i1); + // h = m_fac_H(0:i, i) + MapVec h(&m_fac_H(0, i), i1); + // h <- V'Bw + m_op.trans_product(Vs, w, h); + + // f <- w - V * h + m_fac_f.noalias() = w - Vs * h; + m_beta = m_op.norm(m_fac_f); + + if (m_beta > Scalar(0.717) * m_op.norm(h)) + continue; + + // f/||f|| is going to be the next column of V, so we need to test + // whether V'B(f/||f||) ~= 0 + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + // If not, iteratively correct the residual + int count = 0; + while (count < 5 && ortho_err > m_eps * m_beta) + { + // There is an edge case: when beta=||f|| is close to zero, f mostly consists + // of noises of rounding errors, so the test [ortho_err < eps * beta] is very + // likely to fail. In particular, if beta=0, then the test is ensured to fail. + // Hence when this happens, we force f to be zero, and then restart in the + // next iteration. + if (m_beta < beta_thresh) + { + m_fac_f.setZero(); + m_beta = Scalar(0); + break; + } + + // f <- f - V * Vf + m_fac_f.noalias() -= Vs * Vf.head(i1); + // h <- h + Vf + h.noalias() += Vf.head(i1); + // beta <- ||f|| + m_beta = m_op.norm(m_fac_f); + + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + count++; + } + } + + // Indicate that this is a step-m factorization + m_k = to_m; + } + + // Apply H -> Q'HQ, where Q is from a double shift QR decomposition + void compress_H(const DoubleShiftQR& decomp) + { + decomp.matrix_QtHQ(m_fac_H); + m_k -= 2; + } + + // Apply H -> Q'HQ, where Q is from an upper Hessenberg QR decomposition + void compress_H(const UpperHessenbergQR& decomp) + { + decomp.matrix_QtHQ(m_fac_H); + m_k--; + } + + // Apply V -> VQ and compute the new f. + // Should be called after compress_H(), since m_k is updated there. + // Only need to update the first k+1 columns of V + // The first (m - k + i) elements of the i-th column of Q are non-zero, + // and the rest are zero + void compress_V(const Matrix& Q) + { + Matrix Vs(m_n, m_k + 1); + for (Index i = 0; i < m_k; i++) + { + const Index nnz = m_m - m_k + i + 1; + MapConstVec q(&Q(0, i), nnz); + Vs.col(i).noalias() = m_fac_V.leftCols(nnz) * q; + } + Vs.col(m_k).noalias() = m_fac_V * Q.col(m_k); + m_fac_V.leftCols(m_k + 1).noalias() = Vs; + + Vector fk = m_fac_f * Q(m_m - 1, m_k - 1) + m_fac_V.col(m_k) * m_fac_H(m_k, m_k - 1); + m_fac_f.swap(fk); + m_beta = m_op.norm(m_fac_f); + } +}; + +} // namespace Spectra + +#endif // ARNOLDI_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h b/gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h new file mode 100644 index 000000000..2345e0fda --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h @@ -0,0 +1,530 @@ +// Copyright (C) 2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef BK_LDLT_H +#define BK_LDLT_H + +#include +#include +#include + +#include "../Util/CompInfo.h" + +namespace Spectra { + +// Bunch-Kaufman LDLT decomposition +// References: +// 1. Bunch, J. R., & Kaufman, L. (1977). Some stable methods for calculating inertia and solving symmetric linear systems. +// Mathematics of computation, 31(137), 163-179. +// 2. Golub, G. H., & Van Loan, C. F. (2012). Matrix computations (Vol. 3). JHU press. Section 4.4. +// 3. Bunch-Parlett diagonal pivoting +// 4. Ashcraft, C., Grimes, R. G., & Lewis, J. G. (1998). Accurate symmetric indefinite linear equation solvers. +// SIAM Journal on Matrix Analysis and Applications, 20(2), 513-561. +template +class BKLDLT +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstVec; + + typedef Eigen::Matrix IntVector; + typedef Eigen::Ref GenericVector; + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + typedef const Eigen::Ref ConstGenericVector; + + Index m_n; + Vector m_data; // storage for a lower-triangular matrix + std::vector m_colptr; // pointers to columns + IntVector m_perm; // [-2, -1, 3, 1, 4, 5]: 0 <-> 2, 1 <-> 1, 2 <-> 3, 3 <-> 1, 4 <-> 4, 5 <-> 5 + std::vector > m_permc; // compressed version of m_perm: [(0, 2), (2, 3), (3, 1)] + + bool m_computed; + int m_info; + + // Access to elements + // Pointer to the k-th column + Scalar* col_pointer(Index k) { return m_colptr[k]; } + // A[i, j] -> m_colptr[j][i - j], i >= j + Scalar& coeff(Index i, Index j) { return m_colptr[j][i - j]; } + const Scalar& coeff(Index i, Index j) const { return m_colptr[j][i - j]; } + // A[i, i] -> m_colptr[i][0] + Scalar& diag_coeff(Index i) { return m_colptr[i][0]; } + const Scalar& diag_coeff(Index i) const { return m_colptr[i][0]; } + + // Compute column pointers + void compute_pointer() + { + m_colptr.clear(); + m_colptr.reserve(m_n); + Scalar* head = m_data.data(); + + for (Index i = 0; i < m_n; i++) + { + m_colptr.push_back(head); + head += (m_n - i); + } + } + + // Copy mat - shift * I to m_data + void copy_data(ConstGenericMatrix& mat, int uplo, const Scalar& shift) + { + if (uplo == Eigen::Lower) + { + for (Index j = 0; j < m_n; j++) + { + const Scalar* begin = &mat.coeffRef(j, j); + const Index len = m_n - j; + std::copy(begin, begin + len, col_pointer(j)); + diag_coeff(j) -= shift; + } + } + else + { + Scalar* dest = m_data.data(); + for (Index i = 0; i < m_n; i++) + { + for (Index j = i; j < m_n; j++, dest++) + { + *dest = mat.coeff(i, j); + } + diag_coeff(i) -= shift; + } + } + } + + // Compute compressed permutations + void compress_permutation() + { + for (Index i = 0; i < m_n; i++) + { + // Recover the permutation action + const Index perm = (m_perm[i] >= 0) ? (m_perm[i]) : (-m_perm[i] - 1); + if (perm != i) + m_permc.push_back(std::make_pair(i, perm)); + } + } + + // Working on the A[k:end, k:end] submatrix + // Exchange k <-> r + // Assume r >= k + void pivoting_1x1(Index k, Index r) + { + // No permutation + if (k == r) + { + m_perm[k] = r; + return; + } + + // A[k, k] <-> A[r, r] + std::swap(diag_coeff(k), diag_coeff(r)); + + // A[(r+1):end, k] <-> A[(r+1):end, r] + std::swap_ranges(&coeff(r + 1, k), col_pointer(k + 1), &coeff(r + 1, r)); + + // A[(k+1):(r-1), k] <-> A[r, (k+1):(r-1)] + Scalar* src = &coeff(k + 1, k); + for (Index j = k + 1; j < r; j++, src++) + { + std::swap(*src, coeff(r, j)); + } + + m_perm[k] = r; + } + + // Working on the A[k:end, k:end] submatrix + // Exchange [k+1, k] <-> [r, p] + // Assume p >= k, r >= k+1 + void pivoting_2x2(Index k, Index r, Index p) + { + pivoting_1x1(k, p); + pivoting_1x1(k + 1, r); + + // A[k+1, k] <-> A[r, k] + std::swap(coeff(k + 1, k), coeff(r, k)); + + // Use negative signs to indicate a 2x2 block + // Also minus one to distinguish a negative zero from a positive zero + m_perm[k] = -m_perm[k] - 1; + m_perm[k + 1] = -m_perm[k + 1] - 1; + } + + // A[r1, c1:c2] <-> A[r2, c1:c2] + // Assume r2 >= r1 > c2 >= c1 + void interchange_rows(Index r1, Index r2, Index c1, Index c2) + { + if (r1 == r2) + return; + + for (Index j = c1; j <= c2; j++) + { + std::swap(coeff(r1, j), coeff(r2, j)); + } + } + + // lambda = |A[r, k]| = max{|A[k+1, k]|, ..., |A[end, k]|} + // Largest (in magnitude) off-diagonal element in the first column of the current reduced matrix + // r is the row index + // Assume k < end + Scalar find_lambda(Index k, Index& r) + { + using std::abs; + + const Scalar* head = col_pointer(k); // => A[k, k] + const Scalar* end = col_pointer(k + 1); + // Start with r=k+1, lambda=A[k+1, k] + r = k + 1; + Scalar lambda = abs(head[1]); + // Scan remaining elements + for (const Scalar* ptr = head + 2; ptr < end; ptr++) + { + const Scalar abs_elem = abs(*ptr); + if (lambda < abs_elem) + { + lambda = abs_elem; + r = k + (ptr - head); + } + } + + return lambda; + } + + // sigma = |A[p, r]| = max {|A[k, r]|, ..., |A[end, r]|} \ {A[r, r]} + // Largest (in magnitude) off-diagonal element in the r-th column of the current reduced matrix + // p is the row index + // Assume k < r < end + Scalar find_sigma(Index k, Index r, Index& p) + { + using std::abs; + + // First search A[r+1, r], ..., A[end, r], which has the same task as find_lambda() + // If r == end, we skip this search + Scalar sigma = Scalar(-1); + if (r < m_n - 1) + sigma = find_lambda(r, p); + + // Then search A[k, r], ..., A[r-1, r], which maps to A[r, k], ..., A[r, r-1] + for (Index j = k; j < r; j++) + { + const Scalar abs_elem = abs(coeff(r, j)); + if (sigma < abs_elem) + { + sigma = abs_elem; + p = j; + } + } + + return sigma; + } + + // Generate permutations and apply to A + // Return true if the resulting pivoting is 1x1, and false if 2x2 + bool permutate_mat(Index k, const Scalar& alpha) + { + using std::abs; + + Index r = k, p = k; + const Scalar lambda = find_lambda(k, r); + + // If lambda=0, no need to interchange + if (lambda > Scalar(0)) + { + const Scalar abs_akk = abs(diag_coeff(k)); + // If |A[k, k]| >= alpha * lambda, no need to interchange + if (abs_akk < alpha * lambda) + { + const Scalar sigma = find_sigma(k, r, p); + + // If sigma * |A[k, k]| >= alpha * lambda^2, no need to interchange + if (sigma * abs_akk < alpha * lambda * lambda) + { + if (abs_akk >= alpha * sigma) + { + // Permutation on A + pivoting_1x1(k, r); + + // Permutation on L + interchange_rows(k, r, 0, k - 1); + return true; + } + else + { + // There are two versions of permutation here + // 1. A[k+1, k] <-> A[r, k] + // 2. A[k+1, k] <-> A[r, p], where p >= k and r >= k+1 + // + // Version 1 and 2 are used by Ref[1] and Ref[2], respectively + + // Version 1 implementation + p = k; + + // Version 2 implementation + // [r, p] and [p, r] are symmetric, but we need to make sure + // p >= k and r >= k+1, so it is safe to always make r > p + // One exception is when min{r,p} == k+1, in which case we make + // r = k+1, so that only one permutation needs to be performed + /* const Index rp_min = std::min(r, p); + const Index rp_max = std::max(r, p); + if(rp_min == k + 1) + { + r = rp_min; p = rp_max; + } else { + r = rp_max; p = rp_min; + } */ + + // Right now we use Version 1 since it reduces the overhead of interchange + + // Permutation on A + pivoting_2x2(k, r, p); + // Permutation on L + interchange_rows(k, p, 0, k - 1); + interchange_rows(k + 1, r, 0, k - 1); + return false; + } + } + } + } + + return true; + } + + // E = [e11, e12] + // [e21, e22] + // Overwrite E with inv(E) + void inverse_inplace_2x2(Scalar& e11, Scalar& e21, Scalar& e22) const + { + // inv(E) = [d11, d12], d11 = e22/delta, d21 = -e21/delta, d22 = e11/delta + // [d21, d22] + const Scalar delta = e11 * e22 - e21 * e21; + std::swap(e11, e22); + e11 /= delta; + e22 /= delta; + e21 = -e21 / delta; + } + + // Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE + int gaussian_elimination_1x1(Index k) + { + // D = 1 / A[k, k] + const Scalar akk = diag_coeff(k); + // Return NUMERICAL_ISSUE if not invertible + if (akk == Scalar(0)) + return NUMERICAL_ISSUE; + + diag_coeff(k) = Scalar(1) / akk; + + // B -= l * l' / A[k, k], B := A[(k+1):end, (k+1):end], l := L[(k+1):end, k] + Scalar* lptr = col_pointer(k) + 1; + const Index ldim = m_n - k - 1; + MapVec l(lptr, ldim); + for (Index j = 0; j < ldim; j++) + { + MapVec(col_pointer(j + k + 1), ldim - j).noalias() -= (lptr[j] / akk) * l.tail(ldim - j); + } + + // l /= A[k, k] + l /= akk; + + return SUCCESSFUL; + } + + // Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE + int gaussian_elimination_2x2(Index k) + { + // D = inv(E) + Scalar& e11 = diag_coeff(k); + Scalar& e21 = coeff(k + 1, k); + Scalar& e22 = diag_coeff(k + 1); + // Return NUMERICAL_ISSUE if not invertible + if (e11 * e22 - e21 * e21 == Scalar(0)) + return NUMERICAL_ISSUE; + + inverse_inplace_2x2(e11, e21, e22); + + // X = l * inv(E), l := L[(k+2):end, k:(k+1)] + Scalar* l1ptr = &coeff(k + 2, k); + Scalar* l2ptr = &coeff(k + 2, k + 1); + const Index ldim = m_n - k - 2; + MapVec l1(l1ptr, ldim), l2(l2ptr, ldim); + + Eigen::Matrix X(ldim, 2); + X.col(0).noalias() = l1 * e11 + l2 * e21; + X.col(1).noalias() = l1 * e21 + l2 * e22; + + // B -= l * inv(E) * l' = X * l', B = A[(k+2):end, (k+2):end] + for (Index j = 0; j < ldim; j++) + { + MapVec(col_pointer(j + k + 2), ldim - j).noalias() -= (X.col(0).tail(ldim - j) * l1ptr[j] + X.col(1).tail(ldim - j) * l2ptr[j]); + } + + // l = X + l1.noalias() = X.col(0); + l2.noalias() = X.col(1); + + return SUCCESSFUL; + } + +public: + BKLDLT() : + m_n(0), m_computed(false), m_info(NOT_COMPUTED) + {} + + // Factorize mat - shift * I + BKLDLT(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) : + m_n(mat.rows()), m_computed(false), m_info(NOT_COMPUTED) + { + compute(mat, uplo, shift); + } + + void compute(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) + { + using std::abs; + + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("BKLDLT: matrix must be square"); + + m_perm.setLinSpaced(m_n, 0, m_n - 1); + m_permc.clear(); + + // Copy data + m_data.resize((m_n * (m_n + 1)) / 2); + compute_pointer(); + copy_data(mat, uplo, shift); + + const Scalar alpha = (1.0 + std::sqrt(17.0)) / 8.0; + Index k = 0; + for (k = 0; k < m_n - 1; k++) + { + // 1. Interchange rows and columns of A, and save the result to m_perm + bool is_1x1 = permutate_mat(k, alpha); + + // 2. Gaussian elimination + if (is_1x1) + { + m_info = gaussian_elimination_1x1(k); + } + else + { + m_info = gaussian_elimination_2x2(k); + k++; + } + + // 3. Check status + if (m_info != SUCCESSFUL) + break; + } + // Invert the last 1x1 block if it exists + if (k == m_n - 1) + { + const Scalar akk = diag_coeff(k); + if (akk == Scalar(0)) + m_info = NUMERICAL_ISSUE; + + diag_coeff(k) = Scalar(1) / diag_coeff(k); + } + + compress_permutation(); + + m_computed = true; + } + + // Solve Ax=b + void solve_inplace(GenericVector b) const + { + if (!m_computed) + throw std::logic_error("BKLDLT: need to call compute() first"); + + // PAP' = LDL' + // 1. b -> Pb + Scalar* x = b.data(); + MapVec res(x, m_n); + Index npermc = m_permc.size(); + for (Index i = 0; i < npermc; i++) + { + std::swap(x[m_permc[i].first], x[m_permc[i].second]); + } + + // 2. Lz = Pb + // If m_perm[end] < 0, then end with m_n - 3, otherwise end with m_n - 2 + const Index end = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2); + for (Index i = 0; i <= end; i++) + { + const Index b1size = m_n - i - 1; + const Index b2size = b1size - 1; + if (m_perm[i] >= 0) + { + MapConstVec l(&coeff(i + 1, i), b1size); + res.segment(i + 1, b1size).noalias() -= l * x[i]; + } + else + { + MapConstVec l1(&coeff(i + 2, i), b2size); + MapConstVec l2(&coeff(i + 2, i + 1), b2size); + res.segment(i + 2, b2size).noalias() -= (l1 * x[i] + l2 * x[i + 1]); + i++; + } + } + + // 3. Dw = z + for (Index i = 0; i < m_n; i++) + { + const Scalar e11 = diag_coeff(i); + if (m_perm[i] >= 0) + { + x[i] *= e11; + } + else + { + const Scalar e21 = coeff(i + 1, i), e22 = diag_coeff(i + 1); + const Scalar wi = x[i] * e11 + x[i + 1] * e21; + x[i + 1] = x[i] * e21 + x[i + 1] * e22; + x[i] = wi; + i++; + } + } + + // 4. L'y = w + // If m_perm[end] < 0, then start with m_n - 3, otherwise start with m_n - 2 + Index i = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2); + for (; i >= 0; i--) + { + const Index ldim = m_n - i - 1; + MapConstVec l(&coeff(i + 1, i), ldim); + x[i] -= res.segment(i + 1, ldim).dot(l); + + if (m_perm[i] < 0) + { + MapConstVec l2(&coeff(i + 1, i - 1), ldim); + x[i - 1] -= res.segment(i + 1, ldim).dot(l2); + i--; + } + } + + // 5. x = P'y + for (Index i = npermc - 1; i >= 0; i--) + { + std::swap(x[m_permc[i].first], x[m_permc[i].second]); + } + } + + Vector solve(ConstGenericVector& b) const + { + Vector res = b; + solve_inplace(res); + return res; + } + + int info() const { return m_info; } +}; + +} // namespace Spectra + +#endif // BK_LDLT_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h b/gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h new file mode 100644 index 000000000..2845688b4 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h @@ -0,0 +1,384 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DOUBLE_SHIFT_QR_H +#define DOUBLE_SHIFT_QR_H + +#include +#include // std::vector +#include // std::min, std::fill, std::copy +#include // std::abs, std::sqrt, std::pow +#include // std::invalid_argument, std::logic_error + +#include "../Util/TypeTraits.h" + +namespace Spectra { + +template +class DoubleShiftQR +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Matrix3X; + typedef Eigen::Matrix Vector; + typedef Eigen::Array IntArray; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + Index m_n; // Dimension of the matrix + Matrix m_mat_H; // A copy of the matrix to be factorized + Scalar m_shift_s; // Shift constant + Scalar m_shift_t; // Shift constant + Matrix3X m_ref_u; // Householder reflectors + IntArray m_ref_nr; // How many rows does each reflector affects + // 3 - A general reflector + // 2 - A Givens rotation + // 1 - An identity transformation + const Scalar m_near_0; // a very small value, but 1.0 / m_safe_min does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, + // e.g. ~= 1e-16 for the "double" type + const Scalar m_eps_rel; + const Scalar m_eps_abs; + bool m_computed; // Whether matrix has been factorized + + void compute_reflector(const Scalar& x1, const Scalar& x2, const Scalar& x3, Index ind) + { + using std::abs; + + Scalar* u = &m_ref_u.coeffRef(0, ind); + unsigned char* nr = m_ref_nr.data(); + // In general case the reflector affects 3 rows + nr[ind] = 3; + Scalar x2x3 = Scalar(0); + // If x3 is zero, decrease nr by 1 + if (abs(x3) < m_near_0) + { + // If x2 is also zero, nr will be 1, and we can exit this function + if (abs(x2) < m_near_0) + { + nr[ind] = 1; + return; + } + else + { + nr[ind] = 2; + } + x2x3 = abs(x2); + } + else + { + x2x3 = Eigen::numext::hypot(x2, x3); + } + + // x1' = x1 - rho * ||x|| + // rho = -sign(x1), if x1 == 0, we choose rho = 1 + Scalar x1_new = x1 - ((x1 <= 0) - (x1 > 0)) * Eigen::numext::hypot(x1, x2x3); + Scalar x_norm = Eigen::numext::hypot(x1_new, x2x3); + // Double check the norm of new x + if (x_norm < m_near_0) + { + nr[ind] = 1; + return; + } + u[0] = x1_new / x_norm; + u[1] = x2 / x_norm; + u[2] = x3 / x_norm; + } + + void compute_reflector(const Scalar* x, Index ind) + { + compute_reflector(x[0], x[1], x[2], ind); + } + + // Update the block X = H(il:iu, il:iu) + void update_block(Index il, Index iu) + { + // Block size + const Index bsize = iu - il + 1; + + // If block size == 1, there is no need to apply reflectors + if (bsize == 1) + { + m_ref_nr.coeffRef(il) = 1; + return; + } + + const Scalar x00 = m_mat_H.coeff(il, il), + x01 = m_mat_H.coeff(il, il + 1), + x10 = m_mat_H.coeff(il + 1, il), + x11 = m_mat_H.coeff(il + 1, il + 1); + // m00 = x00 * (x00 - s) + x01 * x10 + t + const Scalar m00 = x00 * (x00 - m_shift_s) + x01 * x10 + m_shift_t; + // m10 = x10 * (x00 + x11 - s) + const Scalar m10 = x10 * (x00 + x11 - m_shift_s); + + // For block size == 2, do a Givens rotation on M = X * X - s * X + t * I + if (bsize == 2) + { + // This causes nr=2 + compute_reflector(m00, m10, 0, il); + // Apply the reflector to X + apply_PX(m_mat_H.block(il, il, 2, m_n - il), m_n, il); + apply_XP(m_mat_H.block(0, il, il + 2, 2), m_n, il); + + m_ref_nr.coeffRef(il + 1) = 1; + return; + } + + // For block size >=3, use the regular strategy + // m20 = x21 * x10 + const Scalar m20 = m_mat_H.coeff(il + 2, il + 1) * m_mat_H.coeff(il + 1, il); + compute_reflector(m00, m10, m20, il); + + // Apply the first reflector + apply_PX(m_mat_H.block(il, il, 3, m_n - il), m_n, il); + apply_XP(m_mat_H.block(0, il, il + std::min(bsize, Index(4)), 3), m_n, il); + + // Calculate the following reflectors + // If entering this loop, block size is at least 4. + for (Index i = 1; i < bsize - 2; i++) + { + compute_reflector(&m_mat_H.coeffRef(il + i, il + i - 1), il + i); + // Apply the reflector to X + apply_PX(m_mat_H.block(il + i, il + i - 1, 3, m_n - il - i + 1), m_n, il + i); + apply_XP(m_mat_H.block(0, il + i, il + std::min(bsize, Index(i + 4)), 3), m_n, il + i); + } + + // The last reflector + // This causes nr=2 + compute_reflector(m_mat_H.coeff(iu - 1, iu - 2), m_mat_H.coeff(iu, iu - 2), 0, iu - 1); + // Apply the reflector to X + apply_PX(m_mat_H.block(iu - 1, iu - 2, 2, m_n - iu + 2), m_n, iu - 1); + apply_XP(m_mat_H.block(0, iu - 1, il + bsize, 2), m_n, iu - 1); + + m_ref_nr.coeffRef(iu) = 1; + } + + // P = I - 2 * u * u' = P' + // PX = X - 2 * u * (u'X) + void apply_PX(GenericMatrix X, Index stride, Index u_ind) const + { + const Index nr = m_ref_nr.coeff(u_ind); + if (nr == 1) + return; + + const Scalar u0 = m_ref_u.coeff(0, u_ind), + u1 = m_ref_u.coeff(1, u_ind); + const Scalar u0_2 = Scalar(2) * u0, + u1_2 = Scalar(2) * u1; + + const Index nrow = X.rows(); + const Index ncol = X.cols(); + + Scalar* xptr = X.data(); + if (nr == 2 || nrow == 2) + { + for (Index i = 0; i < ncol; i++, xptr += stride) + { + const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1]; + xptr[0] -= tmp * u0; + xptr[1] -= tmp * u1; + } + } + else + { + const Scalar u2 = m_ref_u.coeff(2, u_ind); + const Scalar u2_2 = Scalar(2) * u2; + for (Index i = 0; i < ncol; i++, xptr += stride) + { + const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1] + u2_2 * xptr[2]; + xptr[0] -= tmp * u0; + xptr[1] -= tmp * u1; + xptr[2] -= tmp * u2; + } + } + } + + // x is a pointer to a vector + // Px = x - 2 * dot(x, u) * u + void apply_PX(Scalar* x, Index u_ind) const + { + const Index nr = m_ref_nr.coeff(u_ind); + if (nr == 1) + return; + + const Scalar u0 = m_ref_u.coeff(0, u_ind), + u1 = m_ref_u.coeff(1, u_ind), + u2 = m_ref_u.coeff(2, u_ind); + + // When the reflector only contains two elements, u2 has been set to zero + const bool nr_is_2 = (nr == 2); + const Scalar dot2 = Scalar(2) * (x[0] * u0 + x[1] * u1 + (nr_is_2 ? 0 : (x[2] * u2))); + x[0] -= dot2 * u0; + x[1] -= dot2 * u1; + if (!nr_is_2) + x[2] -= dot2 * u2; + } + + // XP = X - 2 * (X * u) * u' + void apply_XP(GenericMatrix X, Index stride, Index u_ind) const + { + const Index nr = m_ref_nr.coeff(u_ind); + if (nr == 1) + return; + + const Scalar u0 = m_ref_u.coeff(0, u_ind), + u1 = m_ref_u.coeff(1, u_ind); + const Scalar u0_2 = Scalar(2) * u0, + u1_2 = Scalar(2) * u1; + + const int nrow = X.rows(); + const int ncol = X.cols(); + Scalar *X0 = X.data(), *X1 = X0 + stride; // X0 => X.col(0), X1 => X.col(1) + + if (nr == 2 || ncol == 2) + { + // tmp = 2 * u0 * X0 + 2 * u1 * X1 + // X0 => X0 - u0 * tmp + // X1 => X1 - u1 * tmp + for (Index i = 0; i < nrow; i++) + { + const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i]; + X0[i] -= tmp * u0; + X1[i] -= tmp * u1; + } + } + else + { + Scalar* X2 = X1 + stride; // X2 => X.col(2) + const Scalar u2 = m_ref_u.coeff(2, u_ind); + const Scalar u2_2 = Scalar(2) * u2; + for (Index i = 0; i < nrow; i++) + { + const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i] + u2_2 * X2[i]; + X0[i] -= tmp * u0; + X1[i] -= tmp * u1; + X2[i] -= tmp * u2; + } + } + } + +public: + DoubleShiftQR(Index size) : + m_n(size), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps_rel(m_eps), + m_eps_abs(m_near_0 * (m_n / m_eps)), + m_computed(false) + {} + + DoubleShiftQR(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) : + m_n(mat.rows()), + m_mat_H(m_n, m_n), + m_shift_s(s), + m_shift_t(t), + m_ref_u(3, m_n), + m_ref_nr(m_n), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps_rel(m_eps), + m_eps_abs(m_near_0 * (m_n / m_eps)), + m_computed(false) + { + compute(mat, s, t); + } + + void compute(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) + { + using std::abs; + + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("DoubleShiftQR: matrix must be square"); + + m_mat_H.resize(m_n, m_n); + m_shift_s = s; + m_shift_t = t; + m_ref_u.resize(3, m_n); + m_ref_nr.resize(m_n); + + // Make a copy of mat + std::copy(mat.data(), mat.data() + mat.size(), m_mat_H.data()); + + // Obtain the indices of zero elements in the subdiagonal, + // so that H can be divided into several blocks + std::vector zero_ind; + zero_ind.reserve(m_n - 1); + zero_ind.push_back(0); + Scalar* Hii = m_mat_H.data(); + for (Index i = 0; i < m_n - 2; i++, Hii += (m_n + 1)) + { + // Hii[1] => m_mat_H(i + 1, i) + const Scalar h = abs(Hii[1]); + if (h <= 0 || h <= m_eps_rel * (abs(Hii[0]) + abs(Hii[m_n + 1]))) + { + Hii[1] = 0; + zero_ind.push_back(i + 1); + } + // Make sure m_mat_H is upper Hessenberg + // Zero the elements below m_mat_H(i + 1, i) + std::fill(Hii + 2, Hii + m_n - i, Scalar(0)); + } + zero_ind.push_back(m_n); + + for (std::vector::size_type i = 0; i < zero_ind.size() - 1; i++) + { + const Index start = zero_ind[i]; + const Index end = zero_ind[i + 1] - 1; + // Compute refelctors and update each block + update_block(start, end); + } + + m_computed = true; + } + + void matrix_QtHQ(Matrix& dest) const + { + if (!m_computed) + throw std::logic_error("DoubleShiftQR: need to call compute() first"); + + dest.noalias() = m_mat_H; + } + + // Q = P0 * P1 * ... + // Q'y = P_{n-2} * ... * P1 * P0 * y + void apply_QtY(Vector& y) const + { + if (!m_computed) + throw std::logic_error("DoubleShiftQR: need to call compute() first"); + + Scalar* y_ptr = y.data(); + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++, y_ptr++) + { + apply_PX(y_ptr, i); + } + } + + // Q = P0 * P1 * ... + // YQ = Y * P0 * P1 * ... + void apply_YQ(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("DoubleShiftQR: need to call compute() first"); + + const Index nrow = Y.rows(); + const Index n2 = m_n - 2; + for (Index i = 0; i < n2; i++) + { + apply_XP(Y.block(0, i, nrow, 3), nrow, i); + } + apply_XP(Y.block(0, n2, nrow, 2), nrow, n2); + } +}; + +} // namespace Spectra + +#endif // DOUBLE_SHIFT_QR_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/Lanczos.h b/gtsam/3rdparty/Spectra/LinAlg/Lanczos.h new file mode 100644 index 000000000..53cf52be8 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/Lanczos.h @@ -0,0 +1,167 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef LANCZOS_H +#define LANCZOS_H + +#include +#include // std::sqrt +#include // std::invalid_argument +#include // std::stringstream + +#include "Arnoldi.h" + +namespace Spectra { + +// Lanczos factorization A * V = V * H + f * e' +// A: n x n +// V: n x k +// H: k x k +// f: n x 1 +// e: [0, ..., 0, 1] +// V and H are allocated of dimension m, so the maximum value of k is m +template +class Lanczos : public Arnoldi +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstMat; + typedef Eigen::Map MapConstVec; + + using Arnoldi::m_op; + using Arnoldi::m_n; + using Arnoldi::m_m; + using Arnoldi::m_k; + using Arnoldi::m_fac_V; + using Arnoldi::m_fac_H; + using Arnoldi::m_fac_f; + using Arnoldi::m_beta; + using Arnoldi::m_near_0; + using Arnoldi::m_eps; + +public: + Lanczos(const ArnoldiOpType& op, Index m) : + Arnoldi(op, m) + {} + + // Lanczos factorization starting from step-k + void factorize_from(Index from_k, Index to_m, Index& op_counter) + { + using std::sqrt; + + if (to_m <= from_k) + return; + + if (from_k > m_k) + { + std::stringstream msg; + msg << "Lanczos: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")"; + throw std::invalid_argument(msg.str()); + } + + const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n)); + + // Pre-allocate vectors + Vector Vf(to_m); + Vector w(m_n); + + // Keep the upperleft k x k submatrix of H and set other elements to 0 + m_fac_H.rightCols(m_m - from_k).setZero(); + m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero(); + + for (Index i = from_k; i <= to_m - 1; i++) + { + bool restart = false; + // If beta = 0, then the next V is not full rank + // We need to generate a new residual vector that is orthogonal + // to the current V, which we call a restart + if (m_beta < m_near_0) + { + MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns + this->expand_basis(V, 2 * i, m_fac_f, m_beta); + restart = true; + } + + // v <- f / ||f|| + MapVec v(&m_fac_V(0, i), m_n); // The (i+1)-th column + v.noalias() = m_fac_f / m_beta; + + // Note that H[i+1, i] equals to the unrestarted beta + m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta; + + // w <- A * v + m_op.perform_op(v.data(), w.data()); + op_counter++; + + // H[i+1, i+1] = = v'Bw + m_fac_H(i - 1, i) = m_fac_H(i, i - 1); // Due to symmetry + m_fac_H(i, i) = m_op.inner_product(v, w); + + // f <- w - V * V'Bw = w - H[i+1, i] * V{i} - H[i+1, i+1] * V{i+1} + // If restarting, we know that H[i+1, i] = 0 + if (restart) + m_fac_f.noalias() = w - m_fac_H(i, i) * v; + else + m_fac_f.noalias() = w - m_fac_H(i, i - 1) * m_fac_V.col(i - 1) - m_fac_H(i, i) * v; + + m_beta = m_op.norm(m_fac_f); + + // f/||f|| is going to be the next column of V, so we need to test + // whether V'B(f/||f||) ~= 0 + const Index i1 = i + 1; + MapMat Vs(m_fac_V.data(), m_n, i1); // The first (i+1) columns + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + // If not, iteratively correct the residual + int count = 0; + while (count < 5 && ortho_err > m_eps * m_beta) + { + // There is an edge case: when beta=||f|| is close to zero, f mostly consists + // of noises of rounding errors, so the test [ortho_err < eps * beta] is very + // likely to fail. In particular, if beta=0, then the test is ensured to fail. + // Hence when this happens, we force f to be zero, and then restart in the + // next iteration. + if (m_beta < beta_thresh) + { + m_fac_f.setZero(); + m_beta = Scalar(0); + break; + } + + // f <- f - V * Vf + m_fac_f.noalias() -= Vs * Vf.head(i1); + // h <- h + Vf + m_fac_H(i - 1, i) += Vf[i - 1]; + m_fac_H(i, i - 1) = m_fac_H(i - 1, i); + m_fac_H(i, i) += Vf[i]; + // beta <- ||f|| + m_beta = m_op.norm(m_fac_f); + + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + count++; + } + } + + // Indicate that this is a step-m factorization + m_k = to_m; + } + + // Apply H -> Q'HQ, where Q is from a tridiagonal QR decomposition + void compress_H(const TridiagQR& decomp) + { + decomp.matrix_QtHQ(m_fac_H); + m_k--; + } +}; + +} // namespace Spectra + +#endif // LANCZOS_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h b/gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h new file mode 100644 index 000000000..122e0e551 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h @@ -0,0 +1,219 @@ +// The code was adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2010 Jitse Niesen +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef TRIDIAG_EIGEN_H +#define TRIDIAG_EIGEN_H + +#include +#include +#include + +#include "../Util/TypeTraits.h" + +namespace Spectra { + +template +class TridiagEigen +{ +private: + typedef Eigen::Index Index; + // For convenience in adapting the tridiagonal_qr_step() function + typedef Scalar RealScalar; + + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + Index m_n; + Vector m_main_diag; // Main diagonal elements of the matrix + Vector m_sub_diag; // Sub-diagonal elements of the matrix + Matrix m_evecs; // To store eigenvectors + + bool m_computed; + const Scalar m_near_0; // a very small value, ~= 1e-307 for the "double" type + + // Adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h + static void tridiagonal_qr_step(RealScalar* diag, + RealScalar* subdiag, Index start, + Index end, Scalar* matrixQ, + Index n) + { + using std::abs; + + RealScalar td = (diag[end - 1] - diag[end]) * RealScalar(0.5); + RealScalar e = subdiag[end - 1]; + // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still + // underflow thus leading to inf/NaN values when using the following commented code: + // RealScalar e2 = numext::abs2(subdiag[end-1]); + // RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2)); + // This explain the following, somewhat more complicated, version: + RealScalar mu = diag[end]; + if (td == Scalar(0)) + mu -= abs(e); + else + { + RealScalar e2 = Eigen::numext::abs2(subdiag[end - 1]); + RealScalar h = Eigen::numext::hypot(td, e); + if (e2 == RealScalar(0)) + mu -= (e / (td + (td > RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h); + else + mu -= e2 / (td + (td > RealScalar(0) ? h : -h)); + } + + RealScalar x = diag[start] - mu; + RealScalar z = subdiag[start]; + Eigen::Map q(matrixQ, n, n); + for (Index k = start; k < end; ++k) + { + Eigen::JacobiRotation rot; + rot.makeGivens(x, z); + + const RealScalar s = rot.s(); + const RealScalar c = rot.c(); + + // do T = G' T G + RealScalar sdk = s * diag[k] + c * subdiag[k]; + RealScalar dkp1 = s * subdiag[k] + c * diag[k + 1]; + + diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k + 1]); + diag[k + 1] = s * sdk + c * dkp1; + subdiag[k] = c * sdk - s * dkp1; + + if (k > start) + subdiag[k - 1] = c * subdiag[k - 1] - s * z; + + x = subdiag[k]; + + if (k < end - 1) + { + z = -s * subdiag[k + 1]; + subdiag[k + 1] = c * subdiag[k + 1]; + } + + // apply the givens rotation to the unit matrix Q = Q * G + if (matrixQ) + q.applyOnTheRight(k, k + 1, rot); + } + } + +public: + TridiagEigen() : + m_n(0), m_computed(false), + m_near_0(TypeTraits::min() * Scalar(10)) + {} + + TridiagEigen(ConstGenericMatrix& mat) : + m_n(mat.rows()), m_computed(false), + m_near_0(TypeTraits::min() * Scalar(10)) + { + compute(mat); + } + + void compute(ConstGenericMatrix& mat) + { + using std::abs; + + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("TridiagEigen: matrix must be square"); + + m_main_diag.resize(m_n); + m_sub_diag.resize(m_n - 1); + m_evecs.resize(m_n, m_n); + m_evecs.setIdentity(); + + // Scale matrix to improve stability + const Scalar scale = std::max(mat.diagonal().cwiseAbs().maxCoeff(), + mat.diagonal(-1).cwiseAbs().maxCoeff()); + // If scale=0, mat is a zero matrix, so we can early stop + if (scale < m_near_0) + { + // m_main_diag contains eigenvalues + m_main_diag.setZero(); + // m_evecs has been set identity + // m_evecs.setIdentity(); + m_computed = true; + return; + } + m_main_diag.noalias() = mat.diagonal() / scale; + m_sub_diag.noalias() = mat.diagonal(-1) / scale; + + Scalar* diag = m_main_diag.data(); + Scalar* subdiag = m_sub_diag.data(); + + Index end = m_n - 1; + Index start = 0; + Index iter = 0; // total number of iterations + int info = 0; // 0 for success, 1 for failure + + const Scalar considerAsZero = TypeTraits::min(); + const Scalar precision = Scalar(2) * Eigen::NumTraits::epsilon(); + + while (end > 0) + { + for (Index i = start; i < end; i++) + if (abs(subdiag[i]) <= considerAsZero || + abs(subdiag[i]) <= (abs(diag[i]) + abs(diag[i + 1])) * precision) + subdiag[i] = 0; + + // find the largest unreduced block + while (end > 0 && subdiag[end - 1] == Scalar(0)) + end--; + + if (end <= 0) + break; + + // if we spent too many iterations, we give up + iter++; + if (iter > 30 * m_n) + { + info = 1; + break; + } + + start = end - 1; + while (start > 0 && subdiag[start - 1] != Scalar(0)) + start--; + + tridiagonal_qr_step(diag, subdiag, start, end, m_evecs.data(), m_n); + } + + if (info > 0) + throw std::runtime_error("TridiagEigen: eigen decomposition failed"); + + // Scale eigenvalues back + m_main_diag *= scale; + + m_computed = true; + } + + const Vector& eigenvalues() const + { + if (!m_computed) + throw std::logic_error("TridiagEigen: need to call compute() first"); + + // After calling compute(), main_diag will contain the eigenvalues. + return m_main_diag; + } + + const Matrix& eigenvectors() const + { + if (!m_computed) + throw std::logic_error("TridiagEigen: need to call compute() first"); + + return m_evecs; + } +}; + +} // namespace Spectra + +#endif // TRIDIAG_EIGEN_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h new file mode 100644 index 000000000..4865e9db8 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h @@ -0,0 +1,319 @@ +// The code was adapted from Eigen/src/Eigenvaleus/EigenSolver.h +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2010,2012 Jitse Niesen +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef UPPER_HESSENBERG_EIGEN_H +#define UPPER_HESSENBERG_EIGEN_H + +#include +#include +#include + +namespace Spectra { + +template +class UpperHessenbergEigen +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + Index m_n; // Size of the matrix + Eigen::RealSchur m_realSchur; // Schur decomposition solver + Matrix m_matT; // Schur T matrix + Matrix m_eivec; // Storing eigenvectors + ComplexVector m_eivalues; // Eigenvalues + + bool m_computed; + + void doComputeEigenvectors() + { + using std::abs; + + const Index size = m_eivec.cols(); + const Scalar eps = Eigen::NumTraits::epsilon(); + + // inefficient! this is already computed in RealSchur + Scalar norm(0); + for (Index j = 0; j < size; ++j) + { + norm += m_matT.row(j).segment((std::max)(j - 1, Index(0)), size - (std::max)(j - 1, Index(0))).cwiseAbs().sum(); + } + + // Backsubstitute to find vectors of upper triangular form + if (norm == Scalar(0)) + return; + + for (Index n = size - 1; n >= 0; n--) + { + Scalar p = m_eivalues.coeff(n).real(); + Scalar q = m_eivalues.coeff(n).imag(); + + // Scalar vector + if (q == Scalar(0)) + { + Scalar lastr(0), lastw(0); + Index l = n; + + m_matT.coeffRef(n, n) = Scalar(1); + for (Index i = n - 1; i >= 0; i--) + { + Scalar w = m_matT.coeff(i, i) - p; + Scalar r = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1)); + + if (m_eivalues.coeff(i).imag() < Scalar(0)) + { + lastw = w; + lastr = r; + } + else + { + l = i; + if (m_eivalues.coeff(i).imag() == Scalar(0)) + { + if (w != Scalar(0)) + m_matT.coeffRef(i, n) = -r / w; + else + m_matT.coeffRef(i, n) = -r / (eps * norm); + } + else // Solve real equations + { + Scalar x = m_matT.coeff(i, i + 1); + Scalar y = m_matT.coeff(i + 1, i); + Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag(); + Scalar t = (x * lastr - lastw * r) / denom; + m_matT.coeffRef(i, n) = t; + if (abs(x) > abs(lastw)) + m_matT.coeffRef(i + 1, n) = (-r - w * t) / x; + else + m_matT.coeffRef(i + 1, n) = (-lastr - y * t) / lastw; + } + + // Overflow control + Scalar t = abs(m_matT.coeff(i, n)); + if ((eps * t) * t > Scalar(1)) + m_matT.col(n).tail(size - i) /= t; + } + } + } + else if (q < Scalar(0) && n > 0) + { // Complex vector + Scalar lastra(0), lastsa(0), lastw(0); + Index l = n - 1; + + // Last vector component imaginary so matrix is triangular + if (abs(m_matT.coeff(n, n - 1)) > abs(m_matT.coeff(n - 1, n))) + { + m_matT.coeffRef(n - 1, n - 1) = q / m_matT.coeff(n, n - 1); + m_matT.coeffRef(n - 1, n) = -(m_matT.coeff(n, n) - p) / m_matT.coeff(n, n - 1); + } + else + { + Complex cc = Complex(Scalar(0), -m_matT.coeff(n - 1, n)) / Complex(m_matT.coeff(n - 1, n - 1) - p, q); + m_matT.coeffRef(n - 1, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(n - 1, n) = Eigen::numext::imag(cc); + } + m_matT.coeffRef(n, n - 1) = Scalar(0); + m_matT.coeffRef(n, n) = Scalar(1); + for (Index i = n - 2; i >= 0; i--) + { + Scalar ra = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n - 1).segment(l, n - l + 1)); + Scalar sa = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1)); + Scalar w = m_matT.coeff(i, i) - p; + + if (m_eivalues.coeff(i).imag() < Scalar(0)) + { + lastw = w; + lastra = ra; + lastsa = sa; + } + else + { + l = i; + if (m_eivalues.coeff(i).imag() == Scalar(0)) + { + Complex cc = Complex(-ra, -sa) / Complex(w, q); + m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(i, n) = Eigen::numext::imag(cc); + } + else + { + // Solve complex equations + Scalar x = m_matT.coeff(i, i + 1); + Scalar y = m_matT.coeff(i + 1, i); + Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q; + Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q; + if ((vr == Scalar(0)) && (vi == Scalar(0))) + vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw)); + + Complex cc = Complex(x * lastra - lastw * ra + q * sa, x * lastsa - lastw * sa - q * ra) / Complex(vr, vi); + m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(i, n) = Eigen::numext::imag(cc); + if (abs(x) > (abs(lastw) + abs(q))) + { + m_matT.coeffRef(i + 1, n - 1) = (-ra - w * m_matT.coeff(i, n - 1) + q * m_matT.coeff(i, n)) / x; + m_matT.coeffRef(i + 1, n) = (-sa - w * m_matT.coeff(i, n) - q * m_matT.coeff(i, n - 1)) / x; + } + else + { + cc = Complex(-lastra - y * m_matT.coeff(i, n - 1), -lastsa - y * m_matT.coeff(i, n)) / Complex(lastw, q); + m_matT.coeffRef(i + 1, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(i + 1, n) = Eigen::numext::imag(cc); + } + } + + // Overflow control + Scalar t = std::max(abs(m_matT.coeff(i, n - 1)), abs(m_matT.coeff(i, n))); + if ((eps * t) * t > Scalar(1)) + m_matT.block(i, n - 1, size - i, 2) /= t; + } + } + + // We handled a pair of complex conjugate eigenvalues, so need to skip them both + n--; + } + } + + // Back transformation to get eigenvectors of original matrix + Vector m_tmp(size); + for (Index j = size - 1; j >= 0; j--) + { + m_tmp.noalias() = m_eivec.leftCols(j + 1) * m_matT.col(j).segment(0, j + 1); + m_eivec.col(j) = m_tmp; + } + } + +public: + UpperHessenbergEigen() : + m_n(0), m_computed(false) + {} + + UpperHessenbergEigen(ConstGenericMatrix& mat) : + m_n(mat.rows()), m_computed(false) + { + compute(mat); + } + + void compute(ConstGenericMatrix& mat) + { + using std::abs; + using std::sqrt; + + if (mat.rows() != mat.cols()) + throw std::invalid_argument("UpperHessenbergEigen: matrix must be square"); + + m_n = mat.rows(); + // Scale matrix prior to the Schur decomposition + const Scalar scale = mat.cwiseAbs().maxCoeff(); + + // Reduce to real Schur form + Matrix Q = Matrix::Identity(m_n, m_n); + m_realSchur.computeFromHessenberg(mat / scale, Q, true); + if (m_realSchur.info() != Eigen::Success) + throw std::runtime_error("UpperHessenbergEigen: eigen decomposition failed"); + + m_matT = m_realSchur.matrixT(); + m_eivec = m_realSchur.matrixU(); + + // Compute eigenvalues from matT + m_eivalues.resize(m_n); + Index i = 0; + while (i < m_n) + { + // Real eigenvalue + if (i == m_n - 1 || m_matT.coeff(i + 1, i) == Scalar(0)) + { + m_eivalues.coeffRef(i) = m_matT.coeff(i, i); + ++i; + } + else // Complex eigenvalues + { + Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i + 1, i + 1)); + Scalar z; + // Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1))); + // without overflow + { + Scalar t0 = m_matT.coeff(i + 1, i); + Scalar t1 = m_matT.coeff(i, i + 1); + Scalar maxval = std::max(abs(p), std::max(abs(t0), abs(t1))); + t0 /= maxval; + t1 /= maxval; + Scalar p0 = p / maxval; + z = maxval * sqrt(abs(p0 * p0 + t0 * t1)); + } + m_eivalues.coeffRef(i) = Complex(m_matT.coeff(i + 1, i + 1) + p, z); + m_eivalues.coeffRef(i + 1) = Complex(m_matT.coeff(i + 1, i + 1) + p, -z); + i += 2; + } + } + + // Compute eigenvectors + doComputeEigenvectors(); + + // Scale eigenvalues back + m_eivalues *= scale; + + m_computed = true; + } + + const ComplexVector& eigenvalues() const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergEigen: need to call compute() first"); + + return m_eivalues; + } + + ComplexMatrix eigenvectors() + { + using std::abs; + + if (!m_computed) + throw std::logic_error("UpperHessenbergEigen: need to call compute() first"); + + Index n = m_eivec.cols(); + ComplexMatrix matV(n, n); + for (Index j = 0; j < n; ++j) + { + // imaginary part of real eigenvalue is already set to exact zero + if (Eigen::numext::imag(m_eivalues.coeff(j)) == Scalar(0) || j + 1 == n) + { + // we have a real eigen value + matV.col(j) = m_eivec.col(j).template cast(); + matV.col(j).normalize(); + } + else + { + // we have a pair of complex eigen values + for (Index i = 0; i < n; ++i) + { + matV.coeffRef(i, j) = Complex(m_eivec.coeff(i, j), m_eivec.coeff(i, j + 1)); + matV.coeffRef(i, j + 1) = Complex(m_eivec.coeff(i, j), -m_eivec.coeff(i, j + 1)); + } + matV.col(j).normalize(); + matV.col(j + 1).normalize(); + ++j; + } + } + + return matV; + } +}; + +} // namespace Spectra + +#endif // UPPER_HESSENBERG_EIGEN_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h new file mode 100644 index 000000000..5778f11dc --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h @@ -0,0 +1,670 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef UPPER_HESSENBERG_QR_H +#define UPPER_HESSENBERG_QR_H + +#include +#include // std::sqrt +#include // std::fill, std::copy +#include // std::logic_error + +namespace Spectra { + +/// +/// \defgroup Internals Internal Classes +/// +/// Classes for internal use. May be useful to developers. +/// + +/// +/// \ingroup Internals +/// @{ +/// + +/// +/// \defgroup LinearAlgebra Linear Algebra +/// +/// A number of classes for linear algebra operations. +/// + +/// +/// \ingroup LinearAlgebra +/// +/// Perform the QR decomposition of an upper Hessenberg matrix. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// +template +class UpperHessenbergQR +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Matrix RowVector; + typedef Eigen::Array Array; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + Matrix m_mat_T; + +protected: + Index m_n; + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + // Q = G1 * G2 * ... * G_{n-1} + Scalar m_shift; + Array m_rot_cos; + Array m_rot_sin; + bool m_computed; + + // Given x and y, compute 1) r = sqrt(x^2 + y^2), 2) c = x / r, 3) s = -y / r + // If both x and y are zero, set c = 1 and s = 0 + // We must implement it in a numerically stable way + static void compute_rotation(const Scalar& x, const Scalar& y, Scalar& r, Scalar& c, Scalar& s) + { + using std::sqrt; + + const Scalar xsign = (x > Scalar(0)) - (x < Scalar(0)); + const Scalar ysign = (y > Scalar(0)) - (y < Scalar(0)); + const Scalar xabs = x * xsign; + const Scalar yabs = y * ysign; + if (xabs > yabs) + { + // In this case xabs != 0 + const Scalar ratio = yabs / xabs; // so that 0 <= ratio < 1 + const Scalar common = sqrt(Scalar(1) + ratio * ratio); + c = xsign / common; + r = xabs * common; + s = -y / r; + } + else + { + if (yabs == Scalar(0)) + { + r = Scalar(0); + c = Scalar(1); + s = Scalar(0); + return; + } + const Scalar ratio = xabs / yabs; // so that 0 <= ratio <= 1 + const Scalar common = sqrt(Scalar(1) + ratio * ratio); + s = -ysign / common; + r = yabs * common; + c = x / r; + } + } + +public: + /// + /// Constructor to preallocate memory. Computation can + /// be performed later by calling the compute() method. + /// + UpperHessenbergQR(Index size) : + m_n(size), + m_rot_cos(m_n - 1), + m_rot_sin(m_n - 1), + m_computed(false) + {} + + /// + /// Constructor to create an object that performs and stores the + /// QR decomposition of an upper Hessenberg matrix `mat`, with an + /// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix + /// `mat`, and \f$s\f$ is the shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the upper triangular and the lower subdiagonal parts of + /// the matrix are used. + /// + UpperHessenbergQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) : + m_n(mat.rows()), + m_shift(shift), + m_rot_cos(m_n - 1), + m_rot_sin(m_n - 1), + m_computed(false) + { + compute(mat, shift); + } + + /// + /// Virtual destructor. + /// + virtual ~UpperHessenbergQR(){}; + + /// + /// Conduct the QR factorization of an upper Hessenberg matrix with + /// an optional shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the upper triangular and the lower subdiagonal parts of + /// the matrix are used. + /// + virtual void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) + { + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("UpperHessenbergQR: matrix must be square"); + + m_shift = shift; + m_mat_T.resize(m_n, m_n); + m_rot_cos.resize(m_n - 1); + m_rot_sin.resize(m_n - 1); + + // Make a copy of mat - s * I + std::copy(mat.data(), mat.data() + mat.size(), m_mat_T.data()); + m_mat_T.diagonal().array() -= m_shift; + + Scalar xi, xj, r, c, s; + Scalar *Tii, *ptr; + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + Tii = &m_mat_T.coeffRef(i, i); + + // Make sure mat_T is upper Hessenberg + // Zero the elements below mat_T(i + 1, i) + std::fill(Tii + 2, Tii + m_n - i, Scalar(0)); + + xi = Tii[0]; // mat_T(i, i) + xj = Tii[1]; // mat_T(i + 1, i) + compute_rotation(xi, xj, r, c, s); + m_rot_cos[i] = c; + m_rot_sin[i] = s; + + // For a complete QR decomposition, + // we first obtain the rotation matrix + // G = [ cos sin] + // [-sin cos] + // and then do T[i:(i + 1), i:(n - 1)] = G' * T[i:(i + 1), i:(n - 1)] + + // Gt << c, -s, s, c; + // m_mat_T.block(i, i, 2, m_n - i) = Gt * m_mat_T.block(i, i, 2, m_n - i); + Tii[0] = r; // m_mat_T(i, i) => r + Tii[1] = 0; // m_mat_T(i + 1, i) => 0 + ptr = Tii + m_n; // m_mat_T(i, k), k = i+1, i+2, ..., n-1 + for (Index j = i + 1; j < m_n; j++, ptr += m_n) + { + Scalar tmp = ptr[0]; + ptr[0] = c * tmp - s * ptr[1]; + ptr[1] = s * tmp + c * ptr[1]; + } + + // If we do not need to calculate the R matrix, then + // only the cos and sin sequences are required. + // In such case we only update T[i + 1, (i + 1):(n - 1)] + // m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) *= c; + // m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) += s * mat_T.block(i, i + 1, 1, m_n - i - 1); + } + + m_computed = true; + } + + /// + /// Return the \f$R\f$ matrix in the QR decomposition, which is an + /// upper triangular matrix. + /// + /// \return Returned matrix type will be `Eigen::Matrix`, depending on + /// the template parameter `Scalar` defined. + /// + virtual Matrix matrix_R() const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + return m_mat_T; + } + + /// + /// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`, + /// and \f$s\f$ is the shift. The result is an upper Hessenberg matrix. + /// + /// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix`, + /// depending on the template parameter `Scalar` defined. + /// + virtual void matrix_QtHQ(Matrix& dest) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + // Make a copy of the R matrix + dest.resize(m_n, m_n); + std::copy(m_mat_T.data(), m_mat_T.data() + m_mat_T.size(), dest.data()); + + // Compute the RQ matrix + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // RQ[, i:(i + 1)] = RQ[, i:(i + 1)] * Gi + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Scalar *Yi, *Yi1; + Yi = &dest.coeffRef(0, i); + Yi1 = Yi + m_n; // RQ(0, i + 1) + const Index i2 = i + 2; + for (Index j = 0; j < i2; j++) + { + const Scalar tmp = Yi[j]; + Yi[j] = c * tmp - s * Yi1[j]; + Yi1[j] = s * tmp + c * Yi1[j]; + } + + /* Vector dest = RQ.block(0, i, i + 2, 1); + dest.block(0, i, i + 2, 1) = c * Yi - s * dest.block(0, i + 1, i + 2, 1); + dest.block(0, i + 1, i + 2, 1) = s * Yi + c * dest.block(0, i + 1, i + 2, 1); */ + } + + // Add the shift to the diagonal + dest.diagonal().array() += m_shift; + } + + /// + /// Apply the \f$Q\f$ matrix to a vector \f$y\f$. + /// + /// \param Y A vector that will be overwritten by the matrix product \f$Qy\f$. + /// + /// Vector type can be `Eigen::Vector`, depending on + /// the template parameter `Scalar` defined. + /// + // Y -> QY = G1 * G2 * ... * Y + void apply_QY(Vector& Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + for (Index i = m_n - 2; i >= 0; i--) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1)] = Gi * Y[i:(i + 1)] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + const Scalar tmp = Y[i]; + Y[i] = c * tmp + s * Y[i + 1]; + Y[i + 1] = -s * tmp + c * Y[i + 1]; + } + } + + /// + /// Apply the \f$Q\f$ matrix to a vector \f$y\f$. + /// + /// \param Y A vector that will be overwritten by the matrix product \f$Q'y\f$. + /// + /// Vector type can be `Eigen::Vector`, depending on + /// the template parameter `Scalar` defined. + /// + // Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y + void apply_QtY(Vector& Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1)] = Gi' * Y[i:(i + 1)] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + const Scalar tmp = Y[i]; + Y[i] = c * tmp - s * Y[i + 1]; + Y[i + 1] = s * tmp + c * Y[i + 1]; + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$QY\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> QY = G1 * G2 * ... * Y + void apply_QY(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + RowVector Yi(Y.cols()), Yi1(Y.cols()); + for (Index i = m_n - 2; i >= 0; i--) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1), ] = Gi * Y[i:(i + 1), ] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.row(i); + Yi1.noalias() = Y.row(i + 1); + Y.row(i) = c * Yi + s * Yi1; + Y.row(i + 1) = -s * Yi + c * Yi1; + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$Q'Y\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y + void apply_QtY(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + RowVector Yi(Y.cols()), Yi1(Y.cols()); + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1), ] = Gi' * Y[i:(i + 1), ] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.row(i); + Yi1.noalias() = Y.row(i + 1); + Y.row(i) = c * Yi - s * Yi1; + Y.row(i + 1) = s * Yi + c * Yi1; + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$YQ\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> YQ = Y * G1 * G2 * ... + void apply_YQ(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + /*Vector Yi(Y.rows()); + for(Index i = 0; i < m_n - 1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.col(i); + Y.col(i) = c * Yi - s * Y.col(i + 1); + Y.col(i + 1) = s * Yi + c * Y.col(i + 1); + }*/ + Scalar *Y_col_i, *Y_col_i1; + const Index n1 = m_n - 1; + const Index nrow = Y.rows(); + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + + Y_col_i = &Y.coeffRef(0, i); + Y_col_i1 = &Y.coeffRef(0, i + 1); + for (Index j = 0; j < nrow; j++) + { + Scalar tmp = Y_col_i[j]; + Y_col_i[j] = c * tmp - s * Y_col_i1[j]; + Y_col_i1[j] = s * tmp + c * Y_col_i1[j]; + } + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$YQ'\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> YQ' = Y * G_{n-1}' * ... * G2' * G1' + void apply_YQt(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + Vector Yi(Y.rows()); + for (Index i = m_n - 2; i >= 0; i--) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi' + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.col(i); + Y.col(i) = c * Yi + s * Y.col(i + 1); + Y.col(i + 1) = -s * Yi + c * Y.col(i + 1); + } + } +}; + +/// +/// \ingroup LinearAlgebra +/// +/// Perform the QR decomposition of a tridiagonal matrix, a special +/// case of upper Hessenberg matrices. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// +template +class TridiagQR : public UpperHessenbergQR +{ +private: + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef const Eigen::Ref ConstGenericMatrix; + + typedef typename Matrix::Index Index; + + Vector m_T_diag; // diagonal elements of T + Vector m_T_lsub; // lower subdiagonal of T + Vector m_T_usub; // upper subdiagonal of T + Vector m_T_usub2; // 2nd upper subdiagonal of T + +public: + /// + /// Constructor to preallocate memory. Computation can + /// be performed later by calling the compute() method. + /// + TridiagQR(Index size) : + UpperHessenbergQR(size) + {} + + /// + /// Constructor to create an object that performs and stores the + /// QR decomposition of an upper Hessenberg matrix `mat`, with an + /// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix + /// `mat`, and \f$s\f$ is the shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the major- and sub- diagonal parts of + /// the matrix are used. + /// + TridiagQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) : + UpperHessenbergQR(mat.rows()) + { + this->compute(mat, shift); + } + + /// + /// Conduct the QR factorization of a tridiagonal matrix with an + /// optional shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the major- and sub- diagonal parts of + /// the matrix are used. + /// + void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) + { + this->m_n = mat.rows(); + if (this->m_n != mat.cols()) + throw std::invalid_argument("TridiagQR: matrix must be square"); + + this->m_shift = shift; + m_T_diag.resize(this->m_n); + m_T_lsub.resize(this->m_n - 1); + m_T_usub.resize(this->m_n - 1); + m_T_usub2.resize(this->m_n - 2); + this->m_rot_cos.resize(this->m_n - 1); + this->m_rot_sin.resize(this->m_n - 1); + + m_T_diag.array() = mat.diagonal().array() - this->m_shift; + m_T_lsub.noalias() = mat.diagonal(-1); + m_T_usub.noalias() = m_T_lsub; + + // A number of pointers to avoid repeated address calculation + Scalar *c = this->m_rot_cos.data(), // pointer to the cosine vector + *s = this->m_rot_sin.data(), // pointer to the sine vector + r; + const Index n1 = this->m_n - 1; + for (Index i = 0; i < n1; i++) + { + // diag[i] == T[i, i] + // lsub[i] == T[i + 1, i] + // r = sqrt(T[i, i]^2 + T[i + 1, i]^2) + // c = T[i, i] / r, s = -T[i + 1, i] / r + this->compute_rotation(m_T_diag.coeff(i), m_T_lsub.coeff(i), r, *c, *s); + + // For a complete QR decomposition, + // we first obtain the rotation matrix + // G = [ cos sin] + // [-sin cos] + // and then do T[i:(i + 1), i:(i + 2)] = G' * T[i:(i + 1), i:(i + 2)] + + // Update T[i, i] and T[i + 1, i] + // The updated value of T[i, i] is known to be r + // The updated value of T[i + 1, i] is known to be 0 + m_T_diag.coeffRef(i) = r; + m_T_lsub.coeffRef(i) = Scalar(0); + // Update T[i, i + 1] and T[i + 1, i + 1] + // usub[i] == T[i, i + 1] + // diag[i + 1] == T[i + 1, i + 1] + const Scalar tmp = m_T_usub.coeff(i); + m_T_usub.coeffRef(i) = (*c) * tmp - (*s) * m_T_diag.coeff(i + 1); + m_T_diag.coeffRef(i + 1) = (*s) * tmp + (*c) * m_T_diag.coeff(i + 1); + // Update T[i, i + 2] and T[i + 1, i + 2] + // usub2[i] == T[i, i + 2] + // usub[i + 1] == T[i + 1, i + 2] + if (i < n1 - 1) + { + m_T_usub2.coeffRef(i) = -(*s) * m_T_usub.coeff(i + 1); + m_T_usub.coeffRef(i + 1) *= (*c); + } + + c++; + s++; + + // If we do not need to calculate the R matrix, then + // only the cos and sin sequences are required. + // In such case we only update T[i + 1, (i + 1):(i + 2)] + // T[i + 1, i + 1] = c * T[i + 1, i + 1] + s * T[i, i + 1]; + // T[i + 1, i + 2] *= c; + } + + this->m_computed = true; + } + + /// + /// Return the \f$R\f$ matrix in the QR decomposition, which is an + /// upper triangular matrix. + /// + /// \return Returned matrix type will be `Eigen::Matrix`, depending on + /// the template parameter `Scalar` defined. + /// + Matrix matrix_R() const + { + if (!this->m_computed) + throw std::logic_error("TridiagQR: need to call compute() first"); + + Matrix R = Matrix::Zero(this->m_n, this->m_n); + R.diagonal().noalias() = m_T_diag; + R.diagonal(1).noalias() = m_T_usub; + R.diagonal(2).noalias() = m_T_usub2; + + return R; + } + + /// + /// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`, + /// and \f$s\f$ is the shift. The result is a tridiagonal matrix. + /// + /// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix`, + /// depending on the template parameter `Scalar` defined. + /// + void matrix_QtHQ(Matrix& dest) const + { + if (!this->m_computed) + throw std::logic_error("TridiagQR: need to call compute() first"); + + // Make a copy of the R matrix + dest.resize(this->m_n, this->m_n); + dest.setZero(); + dest.diagonal().noalias() = m_T_diag; + // The upper diagonal refers to m_T_usub + // The 2nd upper subdiagonal will be zero in RQ + + // Compute the RQ matrix + // [m11 m12] points to RQ[i:(i+1), i:(i+1)] + // [0 m22] + // + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + const Index n1 = this->m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = this->m_rot_cos.coeff(i); + const Scalar s = this->m_rot_sin.coeff(i); + const Scalar m11 = dest.coeff(i, i), + m12 = m_T_usub.coeff(i), + m22 = m_T_diag.coeff(i + 1); + + // Update the diagonal and the lower subdiagonal of dest + dest.coeffRef(i, i) = c * m11 - s * m12; + dest.coeffRef(i + 1, i) = -s * m22; + dest.coeffRef(i + 1, i + 1) = c * m22; + } + + // Copy the lower subdiagonal to upper subdiagonal + dest.diagonal(1).noalias() = dest.diagonal(-1); + + // Add the shift to the diagonal + dest.diagonal().array() += this->m_shift; + } +}; + +/// +/// @} +/// + +} // namespace Spectra + +#endif // UPPER_HESSENBERG_QR_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h b/gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h new file mode 100644 index 000000000..c41cae729 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h @@ -0,0 +1,108 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_CHOLESKY_H +#define DENSE_CHOLESKY_H + +#include +#include +#include +#include "../Util/CompInfo.h" + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the operations related to Cholesky decomposition on a +/// positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular +/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver +/// in the Cholesky decomposition mode. +/// +template +class DenseCholesky +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstMat; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + const Index m_n; + Eigen::LLT m_decomp; + int m_info; // status of the decomposition + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseCholesky(ConstGenericMatrix& mat) : + m_n(mat.rows()), m_info(NOT_COMPUTED) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseCholesky: matrix must be square"); + + m_decomp.compute(mat); + m_info = (m_decomp.info() == Eigen::Success) ? + SUCCESSFUL : + NUMERICAL_ISSUE; + } + + /// + /// Returns the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Returns the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Performs the lower triangular solving operation \f$y=L^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L) * x_in + void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.matrixL().solve(x); + } + + /// + /// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L') * x_in + void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.matrixU().solve(x); + } +}; + +} // namespace Spectra + +#endif // DENSE_CHOLESKY_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h new file mode 100644 index 000000000..02ad32f8f --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h @@ -0,0 +1,102 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_GEN_COMPLEX_SHIFT_SOLVE_H +#define DENSE_GEN_COMPLEX_SHIFT_SOLVE_H + +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the complex shift-solve operation on a general real matrix \f$A\f$, +/// i.e., calculating \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$ for any complex-valued +/// \f$\sigma\f$ and real-valued vector \f$x\f$. It is mainly used in the +/// GenEigsComplexShiftSolver eigen solver. +/// +template +class DenseGenComplexShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + typedef Eigen::PartialPivLU ComplexSolver; + + ConstGenericMatrix m_mat; + const Index m_n; + ComplexSolver m_solver; + ComplexVector m_x_cache; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseGenComplexShiftSolve(ConstGenericMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseGenComplexShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the complex shift \f$\sigma\f$. + /// + /// \param sigmar Real part of \f$\sigma\f$. + /// \param sigmai Imaginary part of \f$\sigma\f$. + /// + void set_shift(Scalar sigmar, Scalar sigmai) + { + m_solver.compute(m_mat.template cast() - Complex(sigmar, sigmai) * ComplexMatrix::Identity(m_n, m_n)); + m_x_cache.resize(m_n); + m_x_cache.setZero(); + } + + /// + /// Perform the complex shift-solve operation + /// \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = Re( inv(A - sigma * I) * x_in ) + void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_x_cache.real() = MapConstVec(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(m_x_cache).real(); + } +}; + +} // namespace Spectra + +#endif // DENSE_GEN_COMPLEX_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h b/gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h new file mode 100644 index 000000000..c4ade80a3 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h @@ -0,0 +1,80 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_GEN_MAT_PROD_H +#define DENSE_GEN_MAT_PROD_H + +#include + +namespace Spectra { + +/// +/// \defgroup MatOp Matrix Operations +/// +/// Define matrix operations on existing matrix objects +/// + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// general real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the GenEigsSolver and +/// SymEigsSolver eigen solvers. +/// +template +class DenseGenMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseGenMatProd(ConstGenericMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat * x; + } +}; + +} // namespace Spectra + +#endif // DENSE_GEN_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h new file mode 100644 index 000000000..9c93ecc7a --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h @@ -0,0 +1,88 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_GEN_REAL_SHIFT_SOLVE_H +#define DENSE_GEN_REAL_SHIFT_SOLVE_H + +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a general real matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver. +/// +template +class DenseGenRealShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const Index m_n; + Eigen::PartialPivLU m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseGenRealShiftSolve(ConstGenericMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseGenRealShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + m_solver.compute(m_mat - sigma * Matrix::Identity(m_n, m_n)); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // DENSE_GEN_REAL_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h b/gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h new file mode 100644 index 000000000..76c792686 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h @@ -0,0 +1,73 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_SYM_MAT_PROD_H +#define DENSE_SYM_MAT_PROD_H + +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// symmetric real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver. +/// +template +class DenseSymMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseSymMatProd(ConstGenericMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat.template selfadjointView() * x; + } +}; + +} // namespace Spectra + +#endif // DENSE_SYM_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h new file mode 100644 index 000000000..620aa9413 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h @@ -0,0 +1,92 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_SYM_SHIFT_SOLVE_H +#define DENSE_SYM_SHIFT_SOLVE_H + +#include +#include + +#include "../LinAlg/BKLDLT.h" +#include "../Util/CompInfo.h" + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a real symmetric matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver. +/// +template +class DenseSymShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_n; + BKLDLT m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseSymShiftSolve(ConstGenericMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseSymShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + m_solver.compute(m_mat, Uplo, sigma); + if (m_solver.info() != SUCCESSFUL) + throw std::invalid_argument("DenseSymShiftSolve: factorization failed with the given shift"); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // DENSE_SYM_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h b/gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h new file mode 100644 index 000000000..a73efc389 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h @@ -0,0 +1,109 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_CHOLESKY_H +#define SPARSE_CHOLESKY_H + +#include +#include +#include +#include +#include "../Util/CompInfo.h" + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the operations related to Cholesky decomposition on a +/// sparse positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular +/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver +/// in the Cholesky decomposition mode. +/// +template +class SparseCholesky +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + const Index m_n; + Eigen::SimplicialLLT m_decomp; + int m_info; // status of the decomposition + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseCholesky(ConstGenericSparseMatrix& mat) : + m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseCholesky: matrix must be square"); + + m_decomp.compute(mat); + m_info = (m_decomp.info() == Eigen::Success) ? + SUCCESSFUL : + NUMERICAL_ISSUE; + } + + /// + /// Returns the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Returns the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Performs the lower triangular solving operation \f$y=L^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L) * x_in + void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.permutationP() * x; + m_decomp.matrixL().solveInPlace(y); + } + + /// + /// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L') * x_in + void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.matrixU().solve(x); + y = m_decomp.permutationPinv() * y; + } +}; + +} // namespace Spectra + +#endif // SPARSE_CHOLESKY_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h b/gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h new file mode 100644 index 000000000..0cc1f6674 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h @@ -0,0 +1,74 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_GEN_MAT_PROD_H +#define SPARSE_GEN_MAT_PROD_H + +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// sparse real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the GenEigsSolver and SymEigsSolver +/// eigen solvers. +/// +template +class SparseGenMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseGenMatProd(ConstGenericSparseMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat * x; + } +}; + +} // namespace Spectra + +#endif // SPARSE_GEN_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h new file mode 100644 index 000000000..fc6e3c508 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h @@ -0,0 +1,93 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_GEN_REAL_SHIFT_SOLVE_H +#define SPARSE_GEN_REAL_SHIFT_SOLVE_H + +#include +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a sparse real matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver. +/// +template +class SparseGenRealShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + const int m_n; + Eigen::SparseLU m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseGenRealShiftSolve(ConstGenericSparseMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseGenRealShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + SparseMatrix I(m_n, m_n); + I.setIdentity(); + + m_solver.compute(m_mat - sigma * I); + if (m_solver.info() != Eigen::Success) + throw std::invalid_argument("SparseGenRealShiftSolve: factorization failed with the given shift"); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // SPARSE_GEN_REAL_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h b/gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h new file mode 100644 index 000000000..3abd0c177 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h @@ -0,0 +1,100 @@ +// Copyright (C) 2017-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_REGULAR_INVERSE_H +#define SPARSE_REGULAR_INVERSE_H + +#include +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines matrix operations required by the generalized eigen solver +/// in the regular inverse mode. For a sparse and positive definite matrix \f$B\f$, +/// it implements the matrix-vector product \f$y=Bx\f$ and the linear equation +/// solving operation \f$y=B^{-1}x\f$. +/// +/// This class is intended to be used with the SymGEigsSolver generalized eigen solver +/// in the regular inverse mode. +/// +template +class SparseRegularInverse +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + const int m_n; + Eigen::ConjugateGradient m_cg; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseRegularInverse(ConstGenericSparseMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseRegularInverse: matrix must be square"); + + m_cg.compute(mat); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Perform the solving operation \f$y=B^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(B) * x_in + void solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_cg.solve(x); + } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Bx\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = B * x_in + void mat_prod(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_mat.template selfadjointView() * x; + } +}; + +} // namespace Spectra + +#endif // SPARSE_REGULAR_INVERSE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h b/gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h new file mode 100644 index 000000000..2dd8799eb --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h @@ -0,0 +1,73 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_SYM_MAT_PROD_H +#define SPARSE_SYM_MAT_PROD_H + +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// sparse real symmetric matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver. +/// +template +class SparseSymMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseSymMatProd(ConstGenericSparseMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat.template selfadjointView() * x; + } +}; + +} // namespace Spectra + +#endif // SPARSE_SYM_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h new file mode 100644 index 000000000..674c6ac52 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h @@ -0,0 +1,95 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_SYM_SHIFT_SOLVE_H +#define SPARSE_SYM_SHIFT_SOLVE_H + +#include +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a sparse real symmetric matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver. +/// +template +class SparseSymShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + const int m_n; + Eigen::SparseLU m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseSymShiftSolve(ConstGenericSparseMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseSymShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + SparseMatrix mat = m_mat.template selfadjointView(); + SparseMatrix identity(m_n, m_n); + identity.setIdentity(); + mat = mat - sigma * identity; + m_solver.isSymmetric(true); + m_solver.compute(mat); + if (m_solver.info() != Eigen::Success) + throw std::invalid_argument("SparseSymShiftSolve: factorization failed with the given shift"); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // SPARSE_SYM_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h b/gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h new file mode 100644 index 000000000..68654aafd --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h @@ -0,0 +1,150 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef ARNOLDI_OP_H +#define ARNOLDI_OP_H + +#include +#include // std::sqrt + +namespace Spectra { + +/// +/// \ingroup Internals +/// @{ +/// + +/// +/// \defgroup Operators Operators +/// +/// Different types of operators. +/// + +/// +/// \ingroup Operators +/// +/// Operators used in the Arnoldi factorization. +/// +template +class ArnoldiOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + + OpType& m_op; + BOpType& m_Bop; + Vector m_cache; + +public: + ArnoldiOp(OpType* op, BOpType* Bop) : + m_op(*op), m_Bop(*Bop), m_cache(op->rows()) + {} + + inline Index rows() const { return m_op.rows(); } + + // In generalized eigenvalue problem Ax=lambda*Bx, define the inner product to be = x'By. + // For regular eigenvalue problems, it is the usual inner product = x'y + + // Compute = x'By + // x and y are two vectors + template + Scalar inner_product(const Arg1& x, const Arg2& y) + { + m_Bop.mat_prod(y.data(), m_cache.data()); + return x.dot(m_cache); + } + + // Compute res = = X'By + // X is a matrix, y is a vector, res is a vector + template + void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref res) + { + m_Bop.mat_prod(y.data(), m_cache.data()); + res.noalias() = x.transpose() * m_cache; + } + + // B-norm of a vector, ||x||_B = sqrt(x'Bx) + template + Scalar norm(const Arg& x) + { + using std::sqrt; + return sqrt(inner_product(x, x)); + } + + // The "A" operator to generate the Krylov subspace + inline void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_op.perform_op(x_in, y_out); + } +}; + +/// +/// \ingroup Operators +/// +/// Placeholder for the B-operator when \f$B = I\f$. +/// +class IdentityBOp +{}; + +/// +/// \ingroup Operators +/// +/// Partial specialization for the case \f$B = I\f$. +/// +template +class ArnoldiOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + + OpType& m_op; + +public: + ArnoldiOp(OpType* op, IdentityBOp* /*Bop*/) : + m_op(*op) + {} + + inline Index rows() const { return m_op.rows(); } + + // Compute = x'y + // x and y are two vectors + template + Scalar inner_product(const Arg1& x, const Arg2& y) const + { + return x.dot(y); + } + + // Compute res = = X'y + // X is a matrix, y is a vector, res is a vector + template + void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref res) const + { + res.noalias() = x.transpose() * y; + } + + // B-norm of a vector. For regular eigenvalue problems it is simply the L2 norm + template + Scalar norm(const Arg& x) + { + return x.norm(); + } + + // The "A" operator to generate the Krylov subspace + inline void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_op.perform_op(x_in, y_out); + } +}; + +/// +/// @} +/// + +} // namespace Spectra + +#endif // ARNOLDI_OP_H diff --git a/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h new file mode 100644 index 000000000..fa9958352 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h @@ -0,0 +1,75 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_GEIGS_CHOLESKY_OP_H +#define SYM_GEIGS_CHOLESKY_OP_H + +#include +#include "../DenseSymMatProd.h" +#include "../DenseCholesky.h" + +namespace Spectra { + +/// +/// \ingroup Operators +/// +/// This class defines the matrix operation for generalized eigen solver in the +/// Cholesky decomposition mode. It calculates \f$y=L^{-1}A(L')^{-1}x\f$ for any +/// vector \f$x\f$, where \f$L\f$ is the Cholesky decomposition of \f$B\f$. +/// This class is intended for internal use. +/// +template , + typename BOpType = DenseCholesky > +class SymGEigsCholeskyOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + OpType& m_op; + BOpType& m_Bop; + Vector m_cache; // temporary working space + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. + /// + SymGEigsCholeskyOp(OpType& op, BOpType& Bop) : + m_op(op), m_Bop(Bop), m_cache(op.rows()) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_Bop.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_Bop.rows(); } + + /// + /// Perform the matrix operation \f$y=L^{-1}A(L')^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L) * A * inv(L') * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_Bop.upper_triangular_solve(x_in, y_out); + m_op.perform_op(y_out, m_cache.data()); + m_Bop.lower_triangular_solve(m_cache.data(), y_out); + } +}; + +} // namespace Spectra + +#endif // SYM_GEIGS_CHOLESKY_OP_H diff --git a/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h new file mode 100644 index 000000000..514269c7f --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h @@ -0,0 +1,72 @@ +// Copyright (C) 2017-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_GEIGS_REG_INV_OP_H +#define SYM_GEIGS_REG_INV_OP_H + +#include +#include "../SparseSymMatProd.h" +#include "../SparseRegularInverse.h" + +namespace Spectra { + +/// +/// \ingroup Operators +/// +/// This class defines the matrix operation for generalized eigen solver in the +/// regular inverse mode. This class is intended for internal use. +/// +template , + typename BOpType = SparseRegularInverse > +class SymGEigsRegInvOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + OpType& m_op; + BOpType& m_Bop; + Vector m_cache; // temporary working space + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. + /// + SymGEigsRegInvOp(OpType& op, BOpType& Bop) : + m_op(op), m_Bop(Bop), m_cache(op.rows()) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_Bop.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_Bop.rows(); } + + /// + /// Perform the matrix operation \f$y=B^{-1}Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(B) * A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_op.perform_op(x_in, m_cache.data()); + m_Bop.solve(m_cache.data(), y_out); + } +}; + +} // namespace Spectra + +#endif // SYM_GEIGS_REG_INV_OP_H diff --git a/gtsam/3rdparty/Spectra/SymEigsBase.h b/gtsam/3rdparty/Spectra/SymEigsBase.h new file mode 100644 index 000000000..9601425d5 --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymEigsBase.h @@ -0,0 +1,448 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_EIGS_BASE_H +#define SYM_EIGS_BASE_H + +#include +#include // std::vector +#include // std::abs, std::pow, std::sqrt +#include // std::min, std::copy +#include // std::invalid_argument + +#include "Util/TypeTraits.h" +#include "Util/SelectionRule.h" +#include "Util/CompInfo.h" +#include "Util/SimpleRandom.h" +#include "MatOp/internal/ArnoldiOp.h" +#include "LinAlg/UpperHessenbergQR.h" +#include "LinAlg/TridiagEigen.h" +#include "LinAlg/Lanczos.h" + +namespace Spectra { + +/// +/// \defgroup EigenSolver Eigen Solvers +/// +/// Eigen solvers for different types of problems. +/// + +/// +/// \ingroup EigenSolver +/// +/// This is the base class for symmetric eigen solvers, mainly for internal use. +/// It is kept here to provide the documentation for member functions of concrete eigen solvers +/// such as SymEigsSolver and SymEigsShiftSolver. +/// +template +class SymEigsBase +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Array Array; + typedef Eigen::Array BoolArray; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstVec; + + typedef ArnoldiOp ArnoldiOpType; + typedef Lanczos LanczosFac; + +protected: + // clang-format off + OpType* m_op; // object to conduct matrix operation, + // e.g. matrix-vector product + const Index m_n; // dimension of matrix A + const Index m_nev; // number of eigenvalues requested + const Index m_ncv; // dimension of Krylov subspace in the Lanczos method + Index m_nmatop; // number of matrix operations called + Index m_niter; // number of restarting iterations + + LanczosFac m_fac; // Lanczos factorization + Vector m_ritz_val; // Ritz values + +private: + Matrix m_ritz_vec; // Ritz vectors + Vector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates + BoolArray m_ritz_conv; // indicator of the convergence of Ritz values + int m_info; // status of the computation + + const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type + const Scalar m_eps23; // m_eps^(2/3), used to test the convergence + // clang-format on + + // Implicitly restarted Lanczos factorization + void restart(Index k) + { + if (k >= m_ncv) + return; + + TridiagQR decomp(m_ncv); + Matrix Q = Matrix::Identity(m_ncv, m_ncv); + + for (Index i = k; i < m_ncv; i++) + { + // QR decomposition of H-mu*I, mu is the shift + decomp.compute(m_fac.matrix_H(), m_ritz_val[i]); + + // Q -> Q * Qi + decomp.apply_YQ(Q); + // H -> Q'HQ + // Since QR = H - mu * I, we have H = QR + mu * I + // and therefore Q'HQ = RQ + mu * I + m_fac.compress_H(decomp); + } + + m_fac.compress_V(Q); + m_fac.factorize_from(k, m_ncv, m_nmatop); + + retrieve_ritzpair(); + } + + // Calculates the number of converged Ritz values + Index num_converged(Scalar tol) + { + // thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value + Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23); + Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm(); + // Converged "wanted" Ritz values + m_ritz_conv = (resid < thresh); + + return m_ritz_conv.cast().sum(); + } + + // Returns the adjusted nev for restarting + Index nev_adjusted(Index nconv) + { + using std::abs; + + Index nev_new = m_nev; + for (Index i = m_nev; i < m_ncv; i++) + if (abs(m_ritz_est[i]) < m_near_0) + nev_new++; + + // Adjust nev_new, according to dsaup2.f line 677~684 in ARPACK + nev_new += std::min(nconv, (m_ncv - nev_new) / 2); + if (nev_new == 1 && m_ncv >= 6) + nev_new = m_ncv / 2; + else if (nev_new == 1 && m_ncv > 2) + nev_new = 2; + + if (nev_new > m_ncv - 1) + nev_new = m_ncv - 1; + + return nev_new; + } + + // Retrieves and sorts Ritz values and Ritz vectors + void retrieve_ritzpair() + { + TridiagEigen decomp(m_fac.matrix_H()); + const Vector& evals = decomp.eigenvalues(); + const Matrix& evecs = decomp.eigenvectors(); + + SortEigenvalue sorting(evals.data(), evals.size()); + std::vector ind = sorting.index(); + + // For BOTH_ENDS, the eigenvalues are sorted according + // to the LARGEST_ALGE rule, so we need to move those smallest + // values to the left + // The order would be + // Largest => Smallest => 2nd largest => 2nd smallest => ... + // We keep this order since the first k values will always be + // the wanted collection, no matter k is nev_updated (used in restart()) + // or is nev (used in sort_ritzpair()) + if (SelectionRule == BOTH_ENDS) + { + std::vector ind_copy(ind); + for (Index i = 0; i < m_ncv; i++) + { + // If i is even, pick values from the left (large values) + // If i is odd, pick values from the right (small values) + if (i % 2 == 0) + ind[i] = ind_copy[i / 2]; + else + ind[i] = ind_copy[m_ncv - 1 - i / 2]; + } + } + + // Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively + for (Index i = 0; i < m_ncv; i++) + { + m_ritz_val[i] = evals[ind[i]]; + m_ritz_est[i] = evecs(m_ncv - 1, ind[i]); + } + for (Index i = 0; i < m_nev; i++) + { + m_ritz_vec.col(i).noalias() = evecs.col(ind[i]); + } + } + +protected: + // Sorts the first nev Ritz pairs in the specified order + // This is used to return the final results + virtual void sort_ritzpair(int sort_rule) + { + // First make sure that we have a valid index vector + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + std::vector ind = sorting.index(); + + switch (sort_rule) + { + case LARGEST_ALGE: + break; + case LARGEST_MAGN: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_ALGE: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_MAGN: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + default: + throw std::invalid_argument("unsupported sorting rule"); + } + + Vector new_ritz_val(m_ncv); + Matrix new_ritz_vec(m_ncv, m_nev); + BoolArray new_ritz_conv(m_nev); + + for (Index i = 0; i < m_nev; i++) + { + new_ritz_val[i] = m_ritz_val[ind[i]]; + new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]); + new_ritz_conv[i] = m_ritz_conv[ind[i]]; + } + + m_ritz_val.swap(new_ritz_val); + m_ritz_vec.swap(new_ritz_vec); + m_ritz_conv.swap(new_ritz_conv); + } + +public: + /// \cond + + SymEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) : + m_op(op), + m_n(m_op->rows()), + m_nev(nev), + m_ncv(ncv > m_n ? m_n : ncv), + m_nmatop(0), + m_niter(0), + m_fac(ArnoldiOpType(op, Bop), m_ncv), + m_info(NOT_COMPUTED), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3)) + { + if (nev < 1 || nev > m_n - 1) + throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix"); + + if (ncv <= nev || ncv > m_n) + throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix"); + } + + /// + /// Virtual destructor + /// + virtual ~SymEigsBase() {} + + /// \endcond + + /// + /// Initializes the solver by providing an initial residual vector. + /// + /// \param init_resid Pointer to the initial residual vector. + /// + /// **Spectra** (and also **ARPACK**) uses an iterative algorithm + /// to find eigenvalues. This function allows the user to provide the initial + /// residual vector. + /// + void init(const Scalar* init_resid) + { + // Reset all matrices/vectors to zero + m_ritz_val.resize(m_ncv); + m_ritz_vec.resize(m_ncv, m_nev); + m_ritz_est.resize(m_ncv); + m_ritz_conv.resize(m_nev); + + m_ritz_val.setZero(); + m_ritz_vec.setZero(); + m_ritz_est.setZero(); + m_ritz_conv.setZero(); + + m_nmatop = 0; + m_niter = 0; + + // Initialize the Lanczos factorization + MapConstVec v0(init_resid, m_n); + m_fac.init(v0, m_nmatop); + } + + /// + /// Initializes the solver by providing a random initial residual vector. + /// + /// This overloaded function generates a random initial residual vector + /// (with a fixed random seed) for the algorithm. Elements in the vector + /// follow independent Uniform(-0.5, 0.5) distribution. + /// + void init() + { + SimpleRandom rng(0); + Vector init_resid = rng.random_vec(m_n); + init(init_resid.data()); + } + + /// + /// Conducts the major computation procedure. + /// + /// \param maxit Maximum number of iterations allowed in the algorithm. + /// \param tol Precision parameter for the calculated eigenvalues. + /// \param sort_rule Rule to sort the eigenvalues and eigenvectors. + /// Supported values are + /// `Spectra::LARGEST_ALGE`, `Spectra::LARGEST_MAGN`, + /// `Spectra::SMALLEST_ALGE` and `Spectra::SMALLEST_MAGN`, + /// for example `LARGEST_ALGE` indicates that largest eigenvalues + /// come first. Note that this argument is only used to + /// **sort** the final result, and the **selection** rule + /// (e.g. selecting the largest or smallest eigenvalues in the + /// full spectrum) is specified by the template parameter + /// `SelectionRule` of SymEigsSolver. + /// + /// \return Number of converged eigenvalues. + /// + Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_ALGE) + { + // The m-step Lanczos factorization + m_fac.factorize_from(1, m_ncv, m_nmatop); + retrieve_ritzpair(); + // Restarting + Index i, nconv = 0, nev_adj; + for (i = 0; i < maxit; i++) + { + nconv = num_converged(tol); + if (nconv >= m_nev) + break; + + nev_adj = nev_adjusted(nconv); + restart(nev_adj); + } + // Sorting results + sort_ritzpair(sort_rule); + + m_niter += i + 1; + m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING; + + return std::min(m_nev, nconv); + } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Returns the number of iterations used in the computation. + /// + Index num_iterations() const { return m_niter; } + + /// + /// Returns the number of matrix operations used in the computation. + /// + Index num_operations() const { return m_nmatop; } + + /// + /// Returns the converged eigenvalues. + /// + /// \return A vector containing the eigenvalues. + /// Returned vector type will be `Eigen::Vector`, depending on + /// the template parameter `Scalar` defined. + /// + Vector eigenvalues() const + { + const Index nconv = m_ritz_conv.cast().sum(); + Vector res(nconv); + + if (!nconv) + return res; + + Index j = 0; + for (Index i = 0; i < m_nev; i++) + { + if (m_ritz_conv[i]) + { + res[j] = m_ritz_val[i]; + j++; + } + } + + return res; + } + + /// + /// Returns the eigenvectors associated with the converged eigenvalues. + /// + /// \param nvec The number of eigenvectors to return. + /// + /// \return A matrix containing the eigenvectors. + /// Returned matrix type will be `Eigen::Matrix`, + /// depending on the template parameter `Scalar` defined. + /// + virtual Matrix eigenvectors(Index nvec) const + { + const Index nconv = m_ritz_conv.cast().sum(); + nvec = std::min(nvec, nconv); + Matrix res(m_n, nvec); + + if (!nvec) + return res; + + Matrix ritz_vec_conv(m_ncv, nvec); + Index j = 0; + for (Index i = 0; i < m_nev && j < nvec; i++) + { + if (m_ritz_conv[i]) + { + ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i); + j++; + } + } + + res.noalias() = m_fac.matrix_V() * ritz_vec_conv; + + return res; + } + + /// + /// Returns all converged eigenvectors. + /// + virtual Matrix eigenvectors() const + { + return eigenvectors(m_nev); + } +}; + +} // namespace Spectra + +#endif // SYM_EIGS_BASE_H diff --git a/gtsam/3rdparty/Spectra/SymEigsShiftSolver.h b/gtsam/3rdparty/Spectra/SymEigsShiftSolver.h new file mode 100644 index 000000000..e2b410cb1 --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymEigsShiftSolver.h @@ -0,0 +1,203 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_EIGS_SHIFT_SOLVER_H +#define SYM_EIGS_SHIFT_SOLVER_H + +#include + +#include "SymEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseSymShiftSolve.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for real symmetric matrices using +/// the **shift-and-invert mode**. The background information of the symmetric +/// eigen solver is documented in the SymEigsSolver class. Here we focus on +/// explaining the shift-and-invert mode. +/// +/// The shift-and-invert mode is based on the following fact: +/// If \f$\lambda\f$ and \f$x\f$ are a pair of eigenvalue and eigenvector of +/// matrix \f$A\f$, such that \f$Ax=\lambda x\f$, then for any \f$\sigma\f$, +/// we have +/// \f[(A-\sigma I)^{-1}x=\nu x\f] +/// where +/// \f[\nu=\frac{1}{\lambda-\sigma}\f] +/// which indicates that \f$(\nu, x)\f$ is an eigenpair of the matrix +/// \f$(A-\sigma I)^{-1}\f$. +/// +/// Therefore, if we pass the matrix operation \f$(A-\sigma I)^{-1}y\f$ +/// (rather than \f$Ay\f$) to the eigen solver, then we would get the desired +/// values of \f$\nu\f$, and \f$\lambda\f$ can also be easily obtained by noting +/// that \f$\lambda=\sigma+\nu^{-1}\f$. +/// +/// The reason why we need this type of manipulation is that +/// the algorithm of **Spectra** (and also **ARPACK**) +/// is good at finding eigenvalues with large magnitude, but may fail in looking +/// for eigenvalues that are close to zero. However, if we really need them, we +/// can set \f$\sigma=0\f$, find the largest eigenvalues of \f$A^{-1}\f$, and then +/// transform back to \f$\lambda\f$, since in this case largest values of \f$\nu\f$ +/// implies smallest values of \f$\lambda\f$. +/// +/// To summarize, in the shift-and-invert mode, the selection rule will apply to +/// \f$\nu=1/(\lambda-\sigma)\f$ rather than \f$\lambda\f$. So a selection rule +/// of `LARGEST_MAGN` combined with shift \f$\sigma\f$ will find eigenvalues of +/// \f$A\f$ that are closest to \f$\sigma\f$. But note that the eigenvalues() +/// method will always return the eigenvalues in the original problem (i.e., +/// returning \f$\lambda\f$ rather than \f$\nu\f$), and eigenvectors are the +/// same for both the original problem and the shifted-and-inverted problem. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the shifted-and-inverted eigenvalues. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseSymShiftSolve and +/// SparseSymShiftSolve, or define their +/// own that implements all the public member functions as in +/// DenseSymShiftSolve. +/// +/// Below is an example that illustrates the use of the shift-and-invert mode: +/// +/// \code{.cpp} +/// #include +/// #include +/// // is implicitly included +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // A size-10 diagonal matrix with elements 1, 2, ..., 10 +/// Eigen::MatrixXd M = Eigen::MatrixXd::Zero(10, 10); +/// for(int i = 0; i < M.rows(); i++) +/// M(i, i) = i + 1; +/// +/// // Construct matrix operation object using the wrapper class +/// DenseSymShiftSolve op(M); +/// +/// // Construct eigen solver object with shift 0 +/// // This will find eigenvalues that are closest to 0 +/// SymEigsShiftSolver< double, LARGEST_MAGN, +/// DenseSymShiftSolve > eigs(&op, 3, 6, 0.0); +/// +/// eigs.init(); +/// eigs.compute(); +/// if(eigs.info() == SUCCESSFUL) +/// { +/// Eigen::VectorXd evalues = eigs.eigenvalues(); +/// // Will get (3.0, 2.0, 1.0) +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// } +/// +/// return 0; +/// } +/// \endcode +/// +/// Also an example for user-supplied matrix shift-solve operation class: +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// // M = diag(1, 2, ..., 10) +/// class MyDiagonalTenShiftSolve +/// { +/// private: +/// double sigma_; +/// public: +/// int rows() { return 10; } +/// int cols() { return 10; } +/// void set_shift(double sigma) { sigma_ = sigma; } +/// // y_out = inv(A - sigma * I) * x_in +/// // inv(A - sigma * I) = diag(1/(1-sigma), 1/(2-sigma), ...) +/// void perform_op(double *x_in, double *y_out) +/// { +/// for(int i = 0; i < rows(); i++) +/// { +/// y_out[i] = x_in[i] / (i + 1 - sigma_); +/// } +/// } +/// }; +/// +/// int main() +/// { +/// MyDiagonalTenShiftSolve op; +/// // Find three eigenvalues that are closest to 3.14 +/// SymEigsShiftSolver eigs(&op, 3, 6, 3.14); +/// eigs.init(); +/// eigs.compute(); +/// if(eigs.info() == SUCCESSFUL) +/// { +/// Eigen::VectorXd evalues = eigs.eigenvalues(); +/// // Will get (4.0, 3.0, 2.0) +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// } +/// +/// return 0; +/// } +/// \endcode +/// +template > +class SymEigsShiftSolver : public SymEigsBase +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Array Array; + + const Scalar m_sigma; + + // First transform back the Ritz values, and then sort + void sort_ritzpair(int sort_rule) + { + Array m_ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma; + this->m_ritz_val.head(this->m_nev) = m_ritz_val_org; + SymEigsBase::sort_ritzpair(sort_rule); + } + +public: + /// + /// Constructor to create a eigen solver object using the shift-and-invert mode. + /// + /// \param op Pointer to the matrix operation object, which should implement + /// the shift-solve operation of \f$A\f$: calculating + /// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseSymShiftSolve, or + /// define their own that implements all the public member functions + /// as in DenseSymShiftSolve. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv_` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// \param sigma The value of the shift. + /// + SymEigsShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) : + SymEigsBase(op, NULL, nev, ncv), + m_sigma(sigma) + { + this->m_op->set_shift(m_sigma); + } +}; + +} // namespace Spectra + +#endif // SYM_EIGS_SHIFT_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/SymEigsSolver.h b/gtsam/3rdparty/Spectra/SymEigsSolver.h new file mode 100644 index 000000000..404df1e52 --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymEigsSolver.h @@ -0,0 +1,171 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_EIGS_SOLVER_H +#define SYM_EIGS_SOLVER_H + +#include + +#include "SymEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseSymMatProd.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for real symmetric matrices, i.e., +/// to solve \f$Ax=\lambda x\f$ where \f$A\f$ is symmetric. +/// +/// **Spectra** is designed to calculate a specified number (\f$k\f$) +/// of eigenvalues of a large square matrix (\f$A\f$). Usually \f$k\f$ is much +/// less than the size of the matrix (\f$n\f$), so that only a few eigenvalues +/// and eigenvectors are computed. +/// +/// Rather than providing the whole \f$A\f$ matrix, the algorithm only requires +/// the matrix-vector multiplication operation of \f$A\f$. Therefore, users of +/// this solver need to supply a class that computes the result of \f$Av\f$ +/// for any given vector \f$v\f$. The name of this class should be given to +/// the template parameter `OpType`, and instance of this class passed to +/// the constructor of SymEigsSolver. +/// +/// If the matrix \f$A\f$ is already stored as a matrix object in **Eigen**, +/// for example `Eigen::MatrixXd`, then there is an easy way to construct such +/// matrix operation class, by using the built-in wrapper class DenseSymMatProd +/// which wraps an existing matrix object in **Eigen**. This is also the +/// default template parameter for SymEigsSolver. For sparse matrices, the +/// wrapper class SparseSymMatProd can be used similarly. +/// +/// If the users need to define their own matrix-vector multiplication operation +/// class, it should implement all the public member functions as in DenseSymMatProd. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseSymMatProd and +/// SparseSymMatProd, or define their +/// own that implements all the public member functions as in +/// DenseSymMatProd. +/// +/// Below is an example that demonstrates the usage of this class. +/// +/// \code{.cpp} +/// #include +/// #include +/// // is implicitly included +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // We are going to calculate the eigenvalues of M +/// Eigen::MatrixXd A = Eigen::MatrixXd::Random(10, 10); +/// Eigen::MatrixXd M = A + A.transpose(); +/// +/// // Construct matrix operation object using the wrapper class DenseSymMatProd +/// DenseSymMatProd op(M); +/// +/// // Construct eigen solver object, requesting the largest three eigenvalues +/// SymEigsSolver< double, LARGEST_ALGE, DenseSymMatProd > eigs(&op, 3, 6); +/// +/// // Initialize and compute +/// eigs.init(); +/// int nconv = eigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXd evalues; +/// if(eigs.info() == SUCCESSFUL) +/// evalues = eigs.eigenvalues(); +/// +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// +/// return 0; +/// } +/// \endcode +/// +/// And here is an example for user-supplied matrix operation class. +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// // M = diag(1, 2, ..., 10) +/// class MyDiagonalTen +/// { +/// public: +/// int rows() { return 10; } +/// int cols() { return 10; } +/// // y_out = M * x_in +/// void perform_op(double *x_in, double *y_out) +/// { +/// for(int i = 0; i < rows(); i++) +/// { +/// y_out[i] = x_in[i] * (i + 1); +/// } +/// } +/// }; +/// +/// int main() +/// { +/// MyDiagonalTen op; +/// SymEigsSolver eigs(&op, 3, 6); +/// eigs.init(); +/// eigs.compute(); +/// if(eigs.info() == SUCCESSFUL) +/// { +/// Eigen::VectorXd evalues = eigs.eigenvalues(); +/// // Will get (10, 9, 8) +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// } +/// +/// return 0; +/// } +/// \endcode +/// +template > +class SymEigsSolver : public SymEigsBase +{ +private: + typedef Eigen::Index Index; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the matrix operation object, which should implement + /// the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseSymMatProd, or + /// define their own that implements all the public member functions + /// as in DenseSymMatProd. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// + SymEigsSolver(OpType* op, Index nev, Index ncv) : + SymEigsBase(op, NULL, nev, ncv) + {} +}; + +} // namespace Spectra + +#endif // SYM_EIGS_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/SymGEigsSolver.h b/gtsam/3rdparty/Spectra/SymGEigsSolver.h new file mode 100644 index 000000000..68aa37cfc --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymGEigsSolver.h @@ -0,0 +1,326 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_GEIGS_SOLVER_H +#define SYM_GEIGS_SOLVER_H + +#include "SymEigsBase.h" +#include "Util/GEigsMode.h" +#include "MatOp/internal/SymGEigsCholeskyOp.h" +#include "MatOp/internal/SymGEigsRegInvOp.h" + +namespace Spectra { + +/// +/// \defgroup GEigenSolver Generalized Eigen Solvers +/// +/// Generalized eigen solvers for different types of problems. +/// + +/// +/// \ingroup GEigenSolver +/// +/// This class implements the generalized eigen solver for real symmetric +/// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ is symmetric and +/// \f$B\f$ is positive definite. +/// +/// There are two modes of this solver, specified by the template parameter +/// GEigsMode. See the pages for the specialized classes for details. +/// - The Cholesky mode assumes that \f$B\f$ can be factorized using Cholesky +/// decomposition, which is the preferred mode when the decomposition is +/// available. (This can be easily done in Eigen using the dense or sparse +/// Cholesky solver.) +/// See \ref SymGEigsSolver "SymGEigsSolver (Cholesky mode)" for more details. +/// - The regular inverse mode requires the matrix-vector product \f$Bv\f$ and the +/// linear equation solving operation \f$B^{-1}v\f$. This mode should only be +/// used when the Cholesky decomposition of \f$B\f$ is hard to implement, or +/// when computing \f$B^{-1}v\f$ is much faster than the Cholesky decomposition. +/// See \ref SymGEigsSolver "SymGEigsSolver (Regular inverse mode)" for more details. + +// Empty class template +template +class SymGEigsSolver +{}; + +/// +/// \ingroup GEigenSolver +/// +/// This class implements the generalized eigen solver for real symmetric +/// matrices using Cholesky decomposition, i.e., to solve \f$Ax=\lambda Bx\f$ +/// where \f$A\f$ is symmetric and \f$B\f$ is positive definite with the Cholesky +/// decomposition \f$B=LL'\f$. +/// +/// This solver requires two matrix operation objects: one for \f$A\f$ that implements +/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the lower +/// and upper triangular solving \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$. +/// +/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation +/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and +/// the second operation can be created using the DenseCholesky or SparseCholesky +/// classes. If the users need to define their own operation classes, then they +/// should implement all the public member functions as in those built-in classes. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either +/// use the wrapper classes such as DenseSymMatProd and +/// SparseSymMatProd, or define their +/// own that implements all the public member functions as in +/// DenseSymMatProd. +/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either +/// use the wrapper classes such as DenseCholesky and +/// SparseCholesky, or define their +/// own that implements all the public member functions as in +/// DenseCholesky. +/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver +/// it is Spectra::GEIGS_CHOLESKY. +/// +/// Below is an example that demonstrates the usage of this class. +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// #include +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // We are going to solve the generalized eigenvalue problem A * x = lambda * B * x +/// const int n = 100; +/// +/// // Define the A matrix +/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n); +/// Eigen::MatrixXd A = M + M.transpose(); +/// +/// // Define the B matrix, a band matrix with 2 on the diagonal and 1 on the subdiagonals +/// Eigen::SparseMatrix B(n, n); +/// B.reserve(Eigen::VectorXi::Constant(n, 3)); +/// for(int i = 0; i < n; i++) +/// { +/// B.insert(i, i) = 2.0; +/// if(i > 0) +/// B.insert(i - 1, i) = 1.0; +/// if(i < n - 1) +/// B.insert(i + 1, i) = 1.0; +/// } +/// +/// // Construct matrix operation object using the wrapper classes +/// DenseSymMatProd op(A); +/// SparseCholesky Bop(B); +/// +/// // Construct generalized eigen solver object, requesting the largest three generalized eigenvalues +/// SymGEigsSolver, SparseCholesky, GEIGS_CHOLESKY> +/// geigs(&op, &Bop, 3, 6); +/// +/// // Initialize and compute +/// geigs.init(); +/// int nconv = geigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXd evalues; +/// Eigen::MatrixXd evecs; +/// if(geigs.info() == SUCCESSFUL) +/// { +/// evalues = geigs.eigenvalues(); +/// evecs = geigs.eigenvectors(); +/// } +/// +/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl; +/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl; +/// +/// // Verify results using the generalized eigen solver in Eigen +/// Eigen::MatrixXd Bdense = B; +/// Eigen::GeneralizedSelfAdjointEigenSolver es(A, Bdense); +/// +/// std::cout << "Generalized eigenvalues:\n" << es.eigenvalues().tail(3) << std::endl; +/// std::cout << "Generalized eigenvectors:\n" << es.eigenvectors().rightCols(3).topRows(10) << std::endl; +/// +/// return 0; +/// } +/// \endcode + +// Partial specialization for GEigsMode = GEIGS_CHOLESKY +template +class SymGEigsSolver : + public SymEigsBase, IdentityBOp> +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + BOpType* m_Bop; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. It + /// should implement the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper classes such as DenseSymMatProd, or + /// define their own that implements all the public member functions + /// as in DenseSymMatProd. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. It + /// represents a Cholesky decomposition of \f$B\f$, and should + /// implement the lower and upper triangular solving operations: + /// calculating \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$ for any vector + /// \f$v\f$, where \f$LL'=B\f$. Users could either + /// create the object from the wrapper classes such as DenseCholesky, or + /// define their own that implements all the public member functions + /// as in DenseCholesky. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// + SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) : + SymEigsBase, IdentityBOp>( + new SymGEigsCholeskyOp(*op, *Bop), NULL, nev, ncv), + m_Bop(Bop) + {} + + /// \cond + + ~SymGEigsSolver() + { + // m_op contains the constructed SymGEigsCholeskyOp object + delete this->m_op; + } + + Matrix eigenvectors(Index nvec) const + { + Matrix res = SymEigsBase, IdentityBOp>::eigenvectors(nvec); + Vector tmp(res.rows()); + const Index nconv = res.cols(); + for (Index i = 0; i < nconv; i++) + { + m_Bop->upper_triangular_solve(&res(0, i), tmp.data()); + res.col(i).noalias() = tmp; + } + + return res; + } + + Matrix eigenvectors() const + { + return SymGEigsSolver::eigenvectors(this->m_nev); + } + + /// \endcond +}; + +/// +/// \ingroup GEigenSolver +/// +/// This class implements the generalized eigen solver for real symmetric +/// matrices in the regular inverse mode, i.e., to solve \f$Ax=\lambda Bx\f$ +/// where \f$A\f$ is symmetric, and \f$B\f$ is positive definite with the operations +/// defined below. +/// +/// This solver requires two matrix operation objects: one for \f$A\f$ that implements +/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the +/// matrix-vector product \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$. +/// +/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation +/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and +/// the second operation can be created using the SparseRegularInverse class. There is no +/// wrapper class for a dense \f$B\f$ matrix since in this case the Cholesky mode +/// is always preferred. If the users need to define their own operation classes, then they +/// should implement all the public member functions as in those built-in classes. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either +/// use the wrapper classes such as DenseSymMatProd and +/// SparseSymMatProd, or define their +/// own that implements all the public member functions as in +/// DenseSymMatProd. +/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either +/// use the wrapper class SparseRegularInverse, or define their +/// own that implements all the public member functions as in +/// SparseRegularInverse. +/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver +/// it is Spectra::GEIGS_REGULAR_INVERSE. +/// + +// Partial specialization for GEigsMode = GEIGS_REGULAR_INVERSE +template +class SymGEigsSolver : + public SymEigsBase, BOpType> +{ +private: + typedef Eigen::Index Index; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. It + /// should implement the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper classes such as DenseSymMatProd, or + /// define their own that implements all the public member functions + /// as in DenseSymMatProd. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. It should + /// implement the multiplication operation \f$Bv\f$ and the linear equation + /// solving operation \f$B^{-1}v\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class SparseRegularInverse, or + /// define their own that implements all the public member functions + /// as in SparseRegularInverse. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// + SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) : + SymEigsBase, BOpType>( + new SymGEigsRegInvOp(*op, *Bop), Bop, nev, ncv) + {} + + /// \cond + ~SymGEigsSolver() + { + // m_op contains the constructed SymGEigsRegInvOp object + delete this->m_op; + } + /// \endcond +}; + +} // namespace Spectra + +#endif // SYM_GEIGS_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/Util/CompInfo.h b/gtsam/3rdparty/Spectra/Util/CompInfo.h new file mode 100644 index 000000000..07b8399a1 --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/CompInfo.h @@ -0,0 +1,35 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef COMP_INFO_H +#define COMP_INFO_H + +namespace Spectra { + +/// +/// \ingroup Enumerations +/// +/// The enumeration to report the status of computation. +/// +enum COMPUTATION_INFO +{ + SUCCESSFUL = 0, ///< Computation was successful. + + NOT_COMPUTED, ///< Used in eigen solvers, indicating that computation + ///< has not been conducted. Users should call + ///< the `compute()` member function of solvers. + + NOT_CONVERGING, ///< Used in eigen solvers, indicating that some eigenvalues + ///< did not converge. The `compute()` + ///< function returns the number of converged eigenvalues. + + NUMERICAL_ISSUE ///< Used in Cholesky decomposition, indicating that the + ///< matrix is not positive definite. +}; + +} // namespace Spectra + +#endif // COMP_INFO_H diff --git a/gtsam/3rdparty/Spectra/Util/GEigsMode.h b/gtsam/3rdparty/Spectra/Util/GEigsMode.h new file mode 100644 index 000000000..a547ac0bf --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/GEigsMode.h @@ -0,0 +1,32 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEIGS_MODE_H +#define GEIGS_MODE_H + +namespace Spectra { + +/// +/// \ingroup Enumerations +/// +/// The enumeration to specify the mode of generalized eigenvalue solver. +/// +enum GEIGS_MODE +{ + GEIGS_CHOLESKY = 0, ///< Using Cholesky decomposition to solve generalized eigenvalues. + + GEIGS_REGULAR_INVERSE, ///< Regular inverse mode for generalized eigenvalue solver. + + GEIGS_SHIFT_INVERT, ///< Shift-and-invert mode for generalized eigenvalue solver. + + GEIGS_BUCKLING, ///< Buckling mode for generalized eigenvalue solver. + + GEIGS_CAYLEY ///< Cayley transformation mode for generalized eigenvalue solver. +}; + +} // namespace Spectra + +#endif // GEIGS_MODE_H diff --git a/gtsam/3rdparty/Spectra/Util/SelectionRule.h b/gtsam/3rdparty/Spectra/Util/SelectionRule.h new file mode 100644 index 000000000..237950b4d --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/SelectionRule.h @@ -0,0 +1,275 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SELECTION_RULE_H +#define SELECTION_RULE_H + +#include // std::vector +#include // std::abs +#include // std::sort +#include // std::complex +#include // std::pair +#include // std::invalid_argument + +namespace Spectra { + +/// +/// \defgroup Enumerations +/// +/// Enumeration types for the selection rule of eigenvalues. +/// + +/// +/// \ingroup Enumerations +/// +/// The enumeration of selection rules of desired eigenvalues. +/// +enum SELECT_EIGENVALUE +{ + LARGEST_MAGN = 0, ///< Select eigenvalues with largest magnitude. Magnitude + ///< means the absolute value for real numbers and norm for + ///< complex numbers. Applies to both symmetric and general + ///< eigen solvers. + + LARGEST_REAL, ///< Select eigenvalues with largest real part. Only for general eigen solvers. + + LARGEST_IMAG, ///< Select eigenvalues with largest imaginary part (in magnitude). Only for general eigen solvers. + + LARGEST_ALGE, ///< Select eigenvalues with largest algebraic value, considering + ///< any negative sign. Only for symmetric eigen solvers. + + SMALLEST_MAGN, ///< Select eigenvalues with smallest magnitude. Applies to both symmetric and general + ///< eigen solvers. + + SMALLEST_REAL, ///< Select eigenvalues with smallest real part. Only for general eigen solvers. + + SMALLEST_IMAG, ///< Select eigenvalues with smallest imaginary part (in magnitude). Only for general eigen solvers. + + SMALLEST_ALGE, ///< Select eigenvalues with smallest algebraic value. Only for symmetric eigen solvers. + + BOTH_ENDS ///< Select eigenvalues half from each end of the spectrum. When + ///< `nev` is odd, compute more from the high end. Only for symmetric eigen solvers. +}; + +/// +/// \ingroup Enumerations +/// +/// The enumeration of selection rules of desired eigenvalues. Alias for `SELECT_EIGENVALUE`. +/// +enum SELECT_EIGENVALUE_ALIAS +{ + WHICH_LM = 0, ///< Alias for `LARGEST_MAGN` + WHICH_LR, ///< Alias for `LARGEST_REAL` + WHICH_LI, ///< Alias for `LARGEST_IMAG` + WHICH_LA, ///< Alias for `LARGEST_ALGE` + WHICH_SM, ///< Alias for `SMALLEST_MAGN` + WHICH_SR, ///< Alias for `SMALLEST_REAL` + WHICH_SI, ///< Alias for `SMALLEST_IMAG` + WHICH_SA, ///< Alias for `SMALLEST_ALGE` + WHICH_BE ///< Alias for `BOTH_ENDS` +}; + +/// \cond + +// Get the element type of a "scalar" +// ElemType => double +// ElemType< std::complex > => double +template +class ElemType +{ +public: + typedef T type; +}; + +template +class ElemType > +{ +public: + typedef T type; +}; + +// When comparing eigenvalues, we first calculate the "target" +// to sort. For example, if we want to choose the eigenvalues with +// largest magnitude, the target will be -abs(x). +// The minus sign is due to the fact that std::sort() sorts in ascending order. + +// Default target: throw an exception +template +class SortingTarget +{ +public: + static typename ElemType::type get(const Scalar& val) + { + using std::abs; + throw std::invalid_argument("incompatible selection rule"); + return -abs(val); + } +}; + +// Specialization for LARGEST_MAGN +// This covers [float, double, complex] x [LARGEST_MAGN] +template +class SortingTarget +{ +public: + static typename ElemType::type get(const Scalar& val) + { + using std::abs; + return -abs(val); + } +}; + +// Specialization for LARGEST_REAL +// This covers [complex] x [LARGEST_REAL] +template +class SortingTarget, LARGEST_REAL> +{ +public: + static RealType get(const std::complex& val) + { + return -val.real(); + } +}; + +// Specialization for LARGEST_IMAG +// This covers [complex] x [LARGEST_IMAG] +template +class SortingTarget, LARGEST_IMAG> +{ +public: + static RealType get(const std::complex& val) + { + using std::abs; + return -abs(val.imag()); + } +}; + +// Specialization for LARGEST_ALGE +// This covers [float, double] x [LARGEST_ALGE] +template +class SortingTarget +{ +public: + static Scalar get(const Scalar& val) + { + return -val; + } +}; + +// Here BOTH_ENDS is the same as LARGEST_ALGE, but +// we need some additional steps, which are done in +// SymEigsSolver.h => retrieve_ritzpair(). +// There we move the smallest values to the proper locations. +template +class SortingTarget +{ +public: + static Scalar get(const Scalar& val) + { + return -val; + } +}; + +// Specialization for SMALLEST_MAGN +// This covers [float, double, complex] x [SMALLEST_MAGN] +template +class SortingTarget +{ +public: + static typename ElemType::type get(const Scalar& val) + { + using std::abs; + return abs(val); + } +}; + +// Specialization for SMALLEST_REAL +// This covers [complex] x [SMALLEST_REAL] +template +class SortingTarget, SMALLEST_REAL> +{ +public: + static RealType get(const std::complex& val) + { + return val.real(); + } +}; + +// Specialization for SMALLEST_IMAG +// This covers [complex] x [SMALLEST_IMAG] +template +class SortingTarget, SMALLEST_IMAG> +{ +public: + static RealType get(const std::complex& val) + { + using std::abs; + return abs(val.imag()); + } +}; + +// Specialization for SMALLEST_ALGE +// This covers [float, double] x [SMALLEST_ALGE] +template +class SortingTarget +{ +public: + static Scalar get(const Scalar& val) + { + return val; + } +}; + +// Sort eigenvalues and return the order index +template +class PairComparator +{ +public: + bool operator()(const PairType& v1, const PairType& v2) + { + return v1.first < v2.first; + } +}; + +template +class SortEigenvalue +{ +private: + typedef typename ElemType::type TargetType; // Type of the sorting target, will be + // a floating number type, e.g. "double" + typedef std::pair PairType; // Type of the sorting pair, including + // the sorting target and the index + + std::vector pair_sort; + +public: + SortEigenvalue(const T* start, int size) : + pair_sort(size) + { + for (int i = 0; i < size; i++) + { + pair_sort[i].first = SortingTarget::get(start[i]); + pair_sort[i].second = i; + } + PairComparator comp; + std::sort(pair_sort.begin(), pair_sort.end(), comp); + } + + std::vector index() + { + std::vector ind(pair_sort.size()); + for (unsigned int i = 0; i < ind.size(); i++) + ind[i] = pair_sort[i].second; + + return ind; + } +}; + +/// \endcond + +} // namespace Spectra + +#endif // SELECTION_RULE_H diff --git a/gtsam/3rdparty/Spectra/Util/SimpleRandom.h b/gtsam/3rdparty/Spectra/Util/SimpleRandom.h new file mode 100644 index 000000000..83fa7c86f --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/SimpleRandom.h @@ -0,0 +1,91 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SIMPLE_RANDOM_H +#define SIMPLE_RANDOM_H + +#include + +/// \cond + +namespace Spectra { + +// We need a simple pseudo random number generator here: +// 1. It is used to generate initial and restarted residual vector. +// 2. It is not necessary to be so "random" and advanced. All we hope +// is that the residual vector is not in the space spanned by the +// current Krylov space. This should be met almost surely. +// 3. We don't want to call RNG in C++, since we actually want the +// algorithm to be deterministic. Also, calling RNG in C/C++ is not +// allowed in R packages submitted to CRAN. +// 4. The method should be as simple as possible, so an LCG is enough. +// 5. Based on public domain code by Ray Gardner +// http://stjarnhimlen.se/snippets/rg_rand.c + +template +class SimpleRandom +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + + const unsigned int m_a; // multiplier + const unsigned long m_max; // 2^31 - 1 + long m_rand; + + inline long next_long_rand(long seed) + { + unsigned long lo, hi; + + lo = m_a * (long) (seed & 0xFFFF); + hi = m_a * (long) ((unsigned long) seed >> 16); + lo += (hi & 0x7FFF) << 16; + if (lo > m_max) + { + lo &= m_max; + ++lo; + } + lo += hi >> 15; + if (lo > m_max) + { + lo &= m_max; + ++lo; + } + return (long) lo; + } + +public: + SimpleRandom(unsigned long init_seed) : + m_a(16807), + m_max(2147483647L), + m_rand(init_seed ? (init_seed & m_max) : 1) + {} + + Scalar random() + { + m_rand = next_long_rand(m_rand); + return Scalar(m_rand) / Scalar(m_max) - Scalar(0.5); + } + + // Vector of random numbers of type Scalar + // Ranging from -0.5 to 0.5 + Vector random_vec(const Index len) + { + Vector res(len); + for (Index i = 0; i < len; i++) + { + m_rand = next_long_rand(m_rand); + res[i] = Scalar(m_rand) / Scalar(m_max) - Scalar(0.5); + } + return res; + } +}; + +} // namespace Spectra + +/// \endcond + +#endif // SIMPLE_RANDOM_H diff --git a/gtsam/3rdparty/Spectra/Util/TypeTraits.h b/gtsam/3rdparty/Spectra/Util/TypeTraits.h new file mode 100644 index 000000000..29288c5a6 --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/TypeTraits.h @@ -0,0 +1,71 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef TYPE_TRAITS_H +#define TYPE_TRAITS_H + +#include +#include + +/// \cond + +namespace Spectra { + +// For a real value type "Scalar", we want to know its smallest +// positive value, i.e., std::numeric_limits::min(). +// However, we must take non-standard value types into account, +// so we rely on Eigen::NumTraits. +// +// Eigen::NumTraits has defined epsilon() and lowest(), but +// lowest() means negative highest(), which is a very small +// negative value. +// +// Therefore, we manually define this limit, and use eplison()^3 +// to mimic it for non-standard types. + +// Generic definition +template +struct TypeTraits +{ + static inline Scalar min() + { + return Eigen::numext::pow(Eigen::NumTraits::epsilon(), Scalar(3)); + } +}; + +// Full specialization +template <> +struct TypeTraits +{ + static inline float min() + { + return std::numeric_limits::min(); + } +}; + +template <> +struct TypeTraits +{ + static inline double min() + { + return std::numeric_limits::min(); + } +}; + +template <> +struct TypeTraits +{ + static inline long double min() + { + return std::numeric_limits::min(); + } +}; + +} // namespace Spectra + +/// \endcond + +#endif // TYPE_TRAITS_H diff --git a/gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h b/gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h new file mode 100644 index 000000000..69c4d92c0 --- /dev/null +++ b/gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h @@ -0,0 +1,552 @@ +// Written by Anna Araslanova +// Modified by Yixuan Qiu +// License: MIT + +#ifndef LOBPCG_SOLVER +#define LOBPCG_SOLVER + +#include +#include + +#include +#include +#include +#include +#include + +#include "../SymGEigsSolver.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// + +/// *** METHOD +/// The class represent the LOBPCG algorithm, which was invented by Andrew Knyazev +/// Theoretical background of the procedure can be found in the articles below +/// - Knyazev, A.V., 2001. Toward the optimal preconditioned eigensolver : Locally optimal block preconditioned conjugate gradient method.SIAM journal on scientific computing, 23(2), pp.517 - 541. +/// - Knyazev, A.V., Argentati, M.E., Lashuk, I. and Ovtchinnikov, E.E., 2007. Block locally optimal preconditioned eigenvalue xolvers(BLOPEX) in HYPRE and PETSc.SIAM Journal on Scientific Computing, 29(5), pp.2224 - 2239. +/// +/// *** CONDITIONS OF USE +/// Locally Optimal Block Preconditioned Conjugate Gradient(LOBPCG) is a method for finding the M smallest eigenvalues +/// and eigenvectors of a large symmetric positive definite generalized eigenvalue problem +/// \f$Ax=\lambda Bx,\f$ +/// where \f$A_{NxN}\f$ is a symmetric matrix, \f$B\f$ is symmetric and positive - definite. \f$A and B\f$ are also assumed large and sparse +/// \f$\textit{M}\f$ should be \f$\<< textit{N}\f$ (at least \f$\textit{5M} < \textit{N} \f$) +/// +/// *** ARGUMENTS +/// Eigen::SparseMatrix A; // N*N - Ax = lambda*Bx, lrage and sparse +/// Eigen::SparseMatrix X; // N*M - initial approximations to eigenvectors (random in general case) +/// Spectra::LOBPCGSolver solver(A, X); +/// *Eigen::SparseMatrix B; // N*N - Ax = lambda*Bx, sparse, positive definite +/// solver.setConstraints(B); +/// *Eigen::SparseMatrix Y; // N*K - constraints, already found eigenvectors +/// solver.setB(B); +/// *Eigen::SparseMatrix T; // N*N - preconditioner ~ A^-1 +/// solver.setPreconditioner(T); +/// +/// *** OUTCOMES +/// solver.solve(); // compute eigenpairs // void +/// solver.info(); // state of converjance // int +/// solver.residuals(); // get residuals to evaluate biases // Eigen::Matrix +/// solver.eigenvalues(); // get eigenvalues // Eigen::Matrix +/// solver.eigenvectors(); // get eigenvectors // Eigen::Matrix +/// +/// *** EXAMPLE +/// \code{.cpp} +/// #include +/// +/// // random A +/// Matrix a; +/// a = (Matrix::Random(10, 10).array() > 0.6).cast() * Matrix::Random(10, 10).array() * 5; +/// a = Matrix((a).triangularView()) + Matrix((a).triangularView()).transpose(); +/// for (int i = 0; i < 10; i++) +/// a(i, i) = i + 0.5; +/// std::cout << a << "\n"; +/// Eigen::SparseMatrix A(a.sparseView()); +/// // random X +/// Eigen::Matrix x; +/// x = Matrix::Random(10, 2).array(); +/// Eigen::SparseMatrix X(x.sparseView()); +/// // solve Ax = lambda*x +/// Spectra::LOBPCGSolver solver(A, X); +/// solver.compute(10, 1e-4); // 10 iterations, L2_tolerance = 1e-4*N +/// std::cout << "info\n" << solver.info() << std::endl; +/// std::cout << "eigenvalues\n" << solver.eigenvalues() << std::endl; +/// std::cout << "eigenvectors\n" << solver.eigenvectors() << std::endl; +/// std::cout << "residuals\n" << solver.residuals() << std::endl; +/// \endcode +/// + +template +class LOBPCGSolver +{ +private: + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + typedef Eigen::SparseMatrix SparseMatrix; + typedef Eigen::SparseMatrix SparseComplexMatrix; + + const int m_n; // dimension of matrix A + const int m_nev; // number of eigenvalues requested + SparseMatrix A, X; + SparseMatrix m_Y, m_B, m_preconditioner; + bool flag_with_constraints, flag_with_B, flag_with_preconditioner; + +public: + SparseMatrix m_residuals; + Matrix m_evectors; + Vector m_evalues; + int m_info; + +private: + // B-orthonormalize matrix M + int orthogonalizeInPlace(SparseMatrix& M, SparseMatrix& B, + SparseMatrix& true_BM, bool has_true_BM = false) + { + SparseMatrix BM; + + if (has_true_BM == false) + { + if (flag_with_B) + { + BM = B * M; + } + else + { + BM = M; + } + } + else + { + BM = true_BM; + } + + Eigen::SimplicialLDLT chol_MBM(M.transpose() * BM); + + if (chol_MBM.info() != SUCCESSFUL) + { + // LDLT decomposition fail + m_info = chol_MBM.info(); + return chol_MBM.info(); + } + + SparseComplexMatrix Upper_MBM = chol_MBM.matrixU().template cast(); + ComplexVector D_MBM_vec = chol_MBM.vectorD().template cast(); + + D_MBM_vec = D_MBM_vec.cwiseSqrt(); + + for (int i = 0; i < D_MBM_vec.rows(); i++) + { + D_MBM_vec(i) = Complex(1.0, 0.0) / D_MBM_vec(i); + } + + SparseComplexMatrix D_MBM_mat(D_MBM_vec.asDiagonal()); + + SparseComplexMatrix U_inv(Upper_MBM.rows(), Upper_MBM.cols()); + U_inv.setIdentity(); + Upper_MBM.template triangularView().solveInPlace(U_inv); + + SparseComplexMatrix right_product = U_inv * D_MBM_mat; + M = M * right_product.real(); + if (flag_with_B) + { + true_BM = B * M; + } + else + { + true_BM = M; + } + + return SUCCESSFUL; + } + + void applyConstraintsInPlace(SparseMatrix& X, SparseMatrix& Y, + SparseMatrix& B) + { + SparseMatrix BY; + if (flag_with_B) + { + BY = B * Y; + } + else + { + BY = Y; + } + + SparseMatrix YBY = Y.transpose() * BY; + SparseMatrix BYX = BY.transpose() * X; + + SparseMatrix YBY_XYX = (Matrix(YBY).bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(Matrix(BYX))).sparseView(); + X = X - Y * YBY_XYX; + } + + /* + return + 'AB + CD' + */ + Matrix stack_4_matricies(Matrix A, Matrix B, + Matrix C, Matrix D) + { + Matrix result(A.rows() + C.rows(), A.cols() + B.cols()); + result.topLeftCorner(A.rows(), A.cols()) = A; + result.topRightCorner(B.rows(), B.cols()) = B; + result.bottomLeftCorner(C.rows(), C.cols()) = C; + result.bottomRightCorner(D.rows(), D.cols()) = D; + return result; + } + + Matrix stack_9_matricies(Matrix A, Matrix B, Matrix C, + Matrix D, Matrix E, Matrix F, + Matrix G, Matrix H, Matrix I) + { + Matrix result(A.rows() + D.rows() + G.rows(), A.cols() + B.cols() + C.cols()); + result.block(0, 0, A.rows(), A.cols()) = A; + result.block(0, A.cols(), B.rows(), B.cols()) = B; + result.block(0, A.cols() + B.cols(), C.rows(), C.cols()) = C; + result.block(A.rows(), 0, D.rows(), D.cols()) = D; + result.block(A.rows(), A.cols(), E.rows(), E.cols()) = E; + result.block(A.rows(), A.cols() + B.cols(), F.rows(), F.cols()) = F; + result.block(A.rows() + D.rows(), 0, G.rows(), G.cols()) = G; + result.block(A.rows() + D.rows(), A.cols(), H.rows(), H.cols()) = H; + result.block(A.rows() + D.rows(), A.cols() + B.cols(), I.rows(), I.cols()) = I; + + return result; + } + + void sort_epairs(Vector& evalues, Matrix& evectors, int SelectionRule) + { + std::function cmp; + if (SelectionRule == SMALLEST_ALGE) + cmp = std::less{}; + else + cmp = std::greater{}; + + std::map epairs(cmp); + for (int i = 0; i < m_evectors.cols(); ++i) + epairs.insert(std::make_pair(evalues(i), evectors.col(i))); + + int i = 0; + for (auto& epair : epairs) + { + evectors.col(i) = epair.second; + evalues(i) = epair.first; + i++; + } + } + + void removeColumns(SparseMatrix& matrix, std::vector& colToRemove) + { + // remove columns through matrix multiplication + SparseMatrix new_matrix(matrix.cols(), matrix.cols() - int(colToRemove.size())); + int iCol = 0; + std::vector> tripletList; + tripletList.reserve(matrix.cols() - int(colToRemove.size())); + + for (int iRow = 0; iRow < matrix.cols(); iRow++) + { + if (std::find(colToRemove.begin(), colToRemove.end(), iRow) == colToRemove.end()) + { + tripletList.push_back(Eigen::Triplet(iRow, iCol, 1)); + iCol++; + } + } + + new_matrix.setFromTriplets(tripletList.begin(), tripletList.end()); + matrix = matrix * new_matrix; + } + + int checkConvergence_getBlocksize(SparseMatrix& m_residuals, Scalar tolerance_L2, std::vector& columnsToDelete) + { + // square roots from sum of squares by column + int BlockSize = m_nev; + Scalar sum, buffer; + + for (int iCol = 0; iCol < m_nev; iCol++) + { + sum = 0; + for (int iRow = 0; iRow < m_n; iRow++) + { + buffer = m_residuals.coeff(iRow, iCol); + sum += buffer * buffer; + } + + if (sqrt(sum) < tolerance_L2) + { + BlockSize--; + columnsToDelete.push_back(iCol); + } + } + return BlockSize; + } + +public: + LOBPCGSolver(const SparseMatrix& A, const SparseMatrix X) : + m_n(A.rows()), + m_nev(X.cols()), + m_info(NOT_COMPUTED), + flag_with_constraints(false), + flag_with_B(false), + flag_with_preconditioner(false), + A(A), + X(X) + { + if (A.rows() != X.rows() || A.rows() != A.cols()) + throw std::invalid_argument("Wrong size"); + + //if (m_n < 5* m_nev) + // throw std::invalid_argument("The problem size is small compared to the block size. Use standard eigensolver"); + } + + void setConstraints(const SparseMatrix& Y) + { + m_Y = Y; + flag_with_constraints = true; + } + + void setB(const SparseMatrix& B) + { + if (B.rows() != A.rows() || B.cols() != A.cols()) + throw std::invalid_argument("Wrong size"); + m_B = B; + flag_with_B = true; + } + + void setPreconditioner(const SparseMatrix& preconditioner) + { + m_preconditioner = preconditioner; + flag_with_preconditioner = true; + } + + void compute(int maxit = 10, Scalar tol_div_n = 1e-7) + { + Scalar tolerance_L2 = tol_div_n * m_n; + int BlockSize; + int max_iter = std::min(m_n, maxit); + + SparseMatrix directions, AX, AR, BX, AD, ADD, DD, BDD, BD, XAD, RAD, DAD, XBD, RBD, BR, sparse_eVecX, sparse_eVecR, sparse_eVecD, inverse_matrix; + Matrix XAR, RAR, XBR, gramA, gramB, eVecX, eVecR, eVecD; + std::vector columnsToDelete; + + if (flag_with_constraints) + { + // Apply the constraints Y to X + applyConstraintsInPlace(X, m_Y, m_B); + } + + // Make initial vectors orthonormal + // implicit BX declaration + if (orthogonalizeInPlace(X, m_B, BX) != SUCCESSFUL) + { + max_iter = 0; + } + + AX = A * X; + // Solve the following NxN eigenvalue problem for all N eigenvalues and -vectors: + // first approximation via a dense problem + Eigen::EigenSolver eigs(Matrix(X.transpose() * AX)); + + if (eigs.info() != SUCCESSFUL) + { + m_info = eigs.info(); + max_iter = 0; + } + else + { + m_evalues = eigs.eigenvalues().real(); + m_evectors = eigs.eigenvectors().real(); + sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE); + sparse_eVecX = m_evectors.sparseView(); + + X = X * sparse_eVecX; + AX = AX * sparse_eVecX; + BX = BX * sparse_eVecX; + } + + for (int iter_num = 0; iter_num < max_iter; iter_num++) + { + m_residuals.resize(m_n, m_nev); + for (int i = 0; i < m_nev; i++) + { + m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i); + } + BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete); + + if (BlockSize == 0) + { + m_info = SUCCESSFUL; + break; + } + + // substitution of the original active mask + if (columnsToDelete.size() > 0) + { + removeColumns(m_residuals, columnsToDelete); + if (iter_num > 0) + { + removeColumns(directions, columnsToDelete); + removeColumns(AD, columnsToDelete); + removeColumns(BD, columnsToDelete); + } + columnsToDelete.clear(); // for next iteration + } + + if (flag_with_preconditioner) + { + // Apply the preconditioner to the residuals + m_residuals = m_preconditioner * m_residuals; + } + + if (flag_with_constraints) + { + // Apply the constraints Y to residuals + applyConstraintsInPlace(m_residuals, m_Y, m_B); + } + + if (orthogonalizeInPlace(m_residuals, m_B, BR) != SUCCESSFUL) + { + break; + } + AR = A * m_residuals; + + // Orthonormalize conjugate directions + if (iter_num > 0) + { + if (orthogonalizeInPlace(directions, m_B, BD, true) != SUCCESSFUL) + { + break; + } + AD = A * directions; + } + + // Perform the Rayleigh Ritz Procedure + XAR = Matrix(X.transpose() * AR); + RAR = Matrix(m_residuals.transpose() * AR); + XBR = Matrix(X.transpose() * BR); + + if (iter_num > 0) + { + XAD = X.transpose() * AD; + RAD = m_residuals.transpose() * AD; + DAD = directions.transpose() * AD; + XBD = X.transpose() * BD; + RBD = m_residuals.transpose() * BD; + + gramA = stack_9_matricies(m_evalues.asDiagonal(), XAR, XAD, XAR.transpose(), RAR, RAD, XAD.transpose(), RAD.transpose(), DAD.transpose()); + gramB = stack_9_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBD, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize), RBD, XBD.transpose(), RBD.transpose(), Matrix::Identity(BlockSize, BlockSize)); + } + else + { + gramA = stack_4_matricies(m_evalues.asDiagonal(), XAR, XAR.transpose(), RAR); + gramB = stack_4_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize)); + } + + //calculate the lowest/largest m eigenpairs; Solve the generalized eigenvalue problem. + DenseSymMatProd Aop(gramA); + DenseCholesky Bop(gramB); + + SymGEigsSolver, + DenseCholesky, GEIGS_CHOLESKY> + geigs(&Aop, &Bop, m_nev, std::min(10, int(gramA.rows()) - 1)); + + geigs.init(); + int nconv = geigs.compute(); + + //Mat evecs; + if (geigs.info() == SUCCESSFUL) + { + m_evalues = geigs.eigenvalues(); + m_evectors = geigs.eigenvectors(); + sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE); + } + else + { + // Problem With General EgenVec + m_info = geigs.info(); + break; + } + + // Compute Ritz vectors + if (iter_num > 0) + { + eVecX = m_evectors.block(0, 0, m_nev, m_nev); + eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev); + eVecD = m_evectors.block(m_nev + BlockSize, 0, BlockSize, m_nev); + + sparse_eVecX = eVecX.sparseView(); + sparse_eVecR = eVecR.sparseView(); + sparse_eVecD = eVecD.sparseView(); + + DD = m_residuals * sparse_eVecR; // new conjugate directions + ADD = AR * sparse_eVecR; + BDD = BR * sparse_eVecR; + + DD = DD + directions * sparse_eVecD; + ADD = ADD + AD * sparse_eVecD; + BDD = BDD + BD * sparse_eVecD; + } + else + { + eVecX = m_evectors.block(0, 0, m_nev, m_nev); + eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev); + + sparse_eVecX = eVecX.sparseView(); + sparse_eVecR = eVecR.sparseView(); + + DD = m_residuals * sparse_eVecR; + ADD = AR * sparse_eVecR; + BDD = BR * sparse_eVecR; + } + + X = X * sparse_eVecX + DD; + AX = AX * sparse_eVecX + ADD; + BX = BX * sparse_eVecX + BDD; + + directions = DD; + AD = ADD; + BD = BDD; + + } // iteration loop + + // calculate last residuals + m_residuals.resize(m_n, m_nev); + for (int i = 0; i < m_nev; i++) + { + m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i); + } + BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete); + + if (BlockSize == 0) + { + m_info = SUCCESSFUL; + } + } // compute + + Vector eigenvalues() + { + return m_evalues; + } + + Matrix eigenvectors() + { + return m_evectors; + } + + Matrix residuals() + { + return Matrix(m_residuals); + } + + int info() { return m_info; } +}; + +} // namespace Spectra + +#endif // LOBPCG_SOLVER diff --git a/gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h b/gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h new file mode 100644 index 000000000..2fab26b97 --- /dev/null +++ b/gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h @@ -0,0 +1,202 @@ +// Copyright (C) 2018 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef PARTIAL_SVD_SOLVER_H +#define PARTIAL_SVD_SOLVER_H + +#include +#include "../SymEigsSolver.h" + +namespace Spectra { + +// Abstract class for matrix operation +template +class SVDMatOp +{ +public: + virtual int rows() const = 0; + virtual int cols() const = 0; + + // y_out = A' * A * x_in or y_out = A * A' * x_in + virtual void perform_op(const Scalar* x_in, Scalar* y_out) = 0; + + virtual ~SVDMatOp() {} +}; + +// Operation of a tall matrix in SVD +// We compute the eigenvalues of A' * A +// MatrixType is either Eigen::Matrix or Eigen::SparseMatrix +template +class SVDTallMatOp : public SVDMatOp +{ +private: + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_dim; + Vector m_cache; + +public: + // Constructor + SVDTallMatOp(ConstGenericMatrix& mat) : + m_mat(mat), + m_dim(std::min(mat.rows(), mat.cols())), + m_cache(mat.rows()) + {} + + // These are the rows and columns of A' * A + int rows() const { return m_dim; } + int cols() const { return m_dim; } + + // y_out = A' * A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.cols()); + m_cache.noalias() = m_mat * x; + y.noalias() = m_mat.transpose() * m_cache; + } +}; + +// Operation of a wide matrix in SVD +// We compute the eigenvalues of A * A' +// MatrixType is either Eigen::Matrix or Eigen::SparseMatrix +template +class SVDWideMatOp : public SVDMatOp +{ +private: + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_dim; + Vector m_cache; + +public: + // Constructor + SVDWideMatOp(ConstGenericMatrix& mat) : + m_mat(mat), + m_dim(std::min(mat.rows(), mat.cols())), + m_cache(mat.cols()) + {} + + // These are the rows and columns of A * A' + int rows() const { return m_dim; } + int cols() const { return m_dim; } + + // y_out = A * A' * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + MapConstVec x(x_in, m_mat.rows()); + MapVec y(y_out, m_mat.rows()); + m_cache.noalias() = m_mat.transpose() * x; + y.noalias() = m_mat * m_cache; + } +}; + +// Partial SVD solver +// MatrixType is either Eigen::Matrix or Eigen::SparseMatrix +template > +class PartialSVDSolver +{ +private: + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_m; + const int m_n; + SVDMatOp* m_op; + SymEigsSolver >* m_eigs; + int m_nconv; + Matrix m_evecs; + +public: + // Constructor + PartialSVDSolver(ConstGenericMatrix& mat, int ncomp, int ncv) : + m_mat(mat), m_m(mat.rows()), m_n(mat.cols()), m_evecs(0, 0) + { + // Determine the matrix type, tall or wide + if (m_m > m_n) + { + m_op = new SVDTallMatOp(mat); + } + else + { + m_op = new SVDWideMatOp(mat); + } + + // Solver object + m_eigs = new SymEigsSolver >(m_op, ncomp, ncv); + } + + // Destructor + virtual ~PartialSVDSolver() + { + delete m_eigs; + delete m_op; + } + + // Computation + int compute(int maxit = 1000, Scalar tol = 1e-10) + { + m_eigs->init(); + m_nconv = m_eigs->compute(maxit, tol); + + return m_nconv; + } + + // The converged singular values + Vector singular_values() const + { + Vector svals = m_eigs->eigenvalues().cwiseSqrt(); + + return svals; + } + + // The converged left singular vectors + Matrix matrix_U(int nu) + { + if (m_evecs.cols() < 1) + { + m_evecs = m_eigs->eigenvectors(); + } + nu = std::min(nu, m_nconv); + if (m_m <= m_n) + { + return m_evecs.leftCols(nu); + } + + return m_mat * (m_evecs.leftCols(nu).array().rowwise() / m_eigs->eigenvalues().head(nu).transpose().array().sqrt()).matrix(); + } + + // The converged right singular vectors + Matrix matrix_V(int nv) + { + if (m_evecs.cols() < 1) + { + m_evecs = m_eigs->eigenvectors(); + } + nv = std::min(nv, m_nconv); + if (m_m > m_n) + { + return m_evecs.leftCols(nv); + } + + return m_mat.transpose() * (m_evecs.leftCols(nv).array().rowwise() / m_eigs->eigenvalues().head(nv).transpose().array().sqrt()).matrix(); + } +}; + +} // namespace Spectra + +#endif // PARTIAL_SVD_SOLVER_H diff --git a/gtsam/CMakeLists.txt b/gtsam/CMakeLists.txt index 49b8bc19b..37c4a1f88 100644 --- a/gtsam/CMakeLists.txt +++ b/gtsam/CMakeLists.txt @@ -133,12 +133,6 @@ endif() # paths so that the compiler uses GTSAM headers in our source directory instead # of any previously installed GTSAM headers. target_include_directories(gtsam BEFORE PUBLIC - # SuiteSparse_config - $ - $ - # CCOLAMD - $ - $ # main gtsam includes: $ $ @@ -147,6 +141,19 @@ target_include_directories(gtsam BEFORE PUBLIC # unit tests: $ ) +# 3rdparty libraries: use the "system" flag so they are included via "-isystem" +# and warnings (and warnings-considered-errors) in those headers are not +# reported as warnings/errors in our targets: +target_include_directories(gtsam SYSTEM BEFORE PUBLIC + # SuiteSparse_config + $ + $ + # Spectra + $ + # CCOLAMD + $ + $ +) if(GTSAM_SUPPORT_NESTED_DISSECTION) target_include_directories(gtsam BEFORE PUBLIC $ @@ -210,5 +217,5 @@ if (GTSAM_INSTALL_MATLAB_TOOLBOX) endif() # Wrap - wrap_and_install_library(../gtsam.h "${GTSAM_ADDITIONAL_LIBRARIES}" "" "${mexFlags}") + wrap_and_install_library(gtsam.i "${GTSAM_ADDITIONAL_LIBRARIES}" "" "${mexFlags}") endif () diff --git a/gtsam/base/GenericValue.h b/gtsam/base/GenericValue.h index dc205b47f..0e928765f 100644 --- a/gtsam/base/GenericValue.h +++ b/gtsam/base/GenericValue.h @@ -30,6 +30,14 @@ #include #include // operator typeid +#ifdef _WIN32 +#define GENERICVALUE_VISIBILITY +#else +// This will trigger a LNKxxxx on MSVC, so disable for MSVC build +// Please refer to https://github.com/borglab/gtsam/blob/develop/Using-GTSAM-EXPORT.md +#define GENERICVALUE_VISIBILITY GTSAM_EXPORT +#endif + namespace gtsam { /** diff --git a/gtsam/config.h.in b/gtsam/config.h.in index 9dc10c36a..9d1bd4ebd 100644 --- a/gtsam/config.h.in +++ b/gtsam/config.h.in @@ -72,9 +72,6 @@ // Make sure dependent projects that want it can see deprecated functions #cmakedefine GTSAM_ALLOW_DEPRECATED_SINCE_V41 -// Publish flag about Eigen typedef -#cmakedefine GTSAM_TYPEDEF_POINTS_TO_VECTORS - // Support Metis-based nested dissection #cmakedefine GTSAM_SUPPORT_NESTED_DISSECTION diff --git a/gtsam/geometry/Point2.cpp b/gtsam/geometry/Point2.cpp index 4cead869f..d8060cfcf 100644 --- a/gtsam/geometry/Point2.cpp +++ b/gtsam/geometry/Point2.cpp @@ -50,37 +50,6 @@ double distance2(const Point2& p, const Point2& q, OptionalJacobian<1, 2> H1, } } -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS - -/* ************************************************************************* */ -void Point2::print(const string& s) const { - cout << s << *this << endl; -} - -/* ************************************************************************* */ -bool Point2::equals(const Point2& q, double tol) const { - return (std::abs(x() - q.x()) < tol && std::abs(y() - q.y()) < tol); -} - -/* ************************************************************************* */ -double Point2::norm(OptionalJacobian<1,2> H) const { - return gtsam::norm2(*this, H); -} - -/* ************************************************************************* */ -double Point2::distance(const Point2& point, OptionalJacobian<1,2> H1, - OptionalJacobian<1,2> H2) const { - return gtsam::distance2(*this, point, H1, H2); -} - -/* ************************************************************************* */ -ostream &operator<<(ostream &os, const Point2& p) { - os << '(' << p.x() << ", " << p.y() << ')'; - return os; -} - -#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS - /* ************************************************************************* */ // Math inspired by http://paulbourke.net/geometry/circlesphere/ boost::optional circleCircleIntersection(double R_d, double r_d, diff --git a/gtsam/geometry/Point2.h b/gtsam/geometry/Point2.h index e186f7b67..e6574fe41 100644 --- a/gtsam/geometry/Point2.h +++ b/gtsam/geometry/Point2.h @@ -22,111 +22,9 @@ namespace gtsam { -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS - - /// As of GTSAM 4, in order to make GTSAM more lean, - /// it is now possible to just typedef Point2 to Vector2 - typedef Vector2 Point2; - -#else - -/** - * A 2D point - * Complies with the Testable Concept - * Functional, so no set functions: once created, a point is constant. - * @addtogroup geometry - * \nosubgrouping - */ -class Point2 : public Vector2 { -private: - -public: - enum { dimension = 2 }; - /// @name Standard Constructors - /// @{ - - /// default constructor - Point2() {} - - using Vector2::Vector2; - - /// @} - /// @name Advanced Constructors - /// @{ - - /// construct from 2D vector - explicit Point2(const Vector2& v):Vector2(v) {} - /// @} - /// @name Testable - /// @{ - - /// print with optional string - GTSAM_EXPORT void print(const std::string& s = "") const; - - /// equals with an tolerance, prints out message if unequal - GTSAM_EXPORT bool equals(const Point2& q, double tol = 1e-9) const; - - /// @} - /// @name Group - /// @{ - - /// identity - inline static Point2 identity() {return Point2(0,0);} - - /// @} - /// @name Vector Space - /// @{ - - /** creates a unit vector */ - Point2 unit() const { return *this/norm(); } - - /** norm of point, with derivative */ - GTSAM_EXPORT double norm(OptionalJacobian<1,2> H = boost::none) const; - - /** distance between two points */ - GTSAM_EXPORT double distance(const Point2& p2, OptionalJacobian<1,2> H1 = boost::none, - OptionalJacobian<1,2> H2 = boost::none) const; - - /// @} - /// @name Standard Interface - /// @{ - - /// equality - inline bool operator ==(const Point2& q) const {return x()==q.x() && y()==q.y();} - - /// get x - inline double x() const {return (*this)[0];} - - /// get y - inline double y() const {return (*this)[1];} - - /// return vectorized form (column-wise). - const Vector2& vector() const { return *this; } - - /// @} - - /// Streaming - GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point2& p); - - private: - /// @name Advanced Interface - /// @{ - - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) - { - ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Vector2);} - - /// @} -}; - -template<> -struct traits : public internal::VectorSpace { -}; - -#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS +/// As of GTSAM 4, in order to make GTSAM more lean, +/// it is now possible to just typedef Point2 to Vector2 +typedef Vector2 Point2; /// Distance of the point from the origin, with Jacobian GTSAM_EXPORT double norm2(const Point2& p, OptionalJacobian<1, 2> H = boost::none); diff --git a/gtsam/geometry/Point3.cpp b/gtsam/geometry/Point3.cpp index 25fb9b92d..7a46f5988 100644 --- a/gtsam/geometry/Point3.cpp +++ b/gtsam/geometry/Point3.cpp @@ -22,47 +22,6 @@ using namespace std; namespace gtsam { -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS -bool Point3::equals(const Point3 &q, double tol) const { - return (std::abs(x() - q.x()) < tol && std::abs(y() - q.y()) < tol && - std::abs(z() - q.z()) < tol); -} - -void Point3::print(const string& s) const { - cout << s << *this << endl; -} - -/* ************************************************************************* */ -double Point3::distance(const Point3 &q, OptionalJacobian<1, 3> H1, - OptionalJacobian<1, 3> H2) const { - return gtsam::distance3(*this,q,H1,H2); -} - -double Point3::norm(OptionalJacobian<1,3> H) const { - return gtsam::norm3(*this, H); -} - -Point3 Point3::normalized(OptionalJacobian<3,3> H) const { - return gtsam::normalize(*this, H); -} - -Point3 Point3::cross(const Point3 &q, OptionalJacobian<3, 3> H1, - OptionalJacobian<3, 3> H2) const { - return gtsam::cross(*this, q, H1, H2); -} - -double Point3::dot(const Point3 &q, OptionalJacobian<1, 3> H1, - OptionalJacobian<1, 3> H2) const { - return gtsam::dot(*this, q, H1, H2); -} - -/* ************************************************************************* */ -ostream &operator<<(ostream &os, const Point3& p) { - os << '[' << p.x() << ", " << p.y() << ", " << p.z() << "]'"; - return os; -} - -#endif /* ************************************************************************* */ double distance3(const Point3 &p1, const Point3 &q, OptionalJacobian<1, 3> H1, OptionalJacobian<1, 3> H2) { diff --git a/gtsam/geometry/Point3.h b/gtsam/geometry/Point3.h index 1ab5c313e..19e328022 100644 --- a/gtsam/geometry/Point3.h +++ b/gtsam/geometry/Point3.h @@ -29,106 +29,9 @@ namespace gtsam { -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS - - /// As of GTSAM 4, in order to make GTSAM more lean, - /// it is now possible to just typedef Point3 to Vector3 - typedef Vector3 Point3; - -#else - -/** - * A 3D point is just a Vector3 with some additional methods - * @addtogroup geometry - * \nosubgrouping - */ -class Point3 : public Vector3 { - - public: - - enum { dimension = 3 }; - - /// @name Standard Constructors - /// @{ - - using Vector3::Vector3; - - /// @} - /// @name Testable - /// @{ - - /** print with optional string */ - GTSAM_EXPORT void print(const std::string& s = "") const; - - /** equals with an tolerance */ - GTSAM_EXPORT bool equals(const Point3& p, double tol = 1e-9) const; - - /// @} - /// @name Group - /// @{ - - /// identity for group operation - inline static Point3 identity() { return Point3(0.0, 0.0, 0.0); } - - /// @} - /// @name Vector Space - /// @{ - - /** distance between two points */ - GTSAM_EXPORT double distance(const Point3& p2, OptionalJacobian<1, 3> H1 = boost::none, - OptionalJacobian<1, 3> H2 = boost::none) const; - - /** Distance of the point from the origin, with Jacobian */ - GTSAM_EXPORT double norm(OptionalJacobian<1,3> H = boost::none) const; - - /** normalize, with optional Jacobian */ - GTSAM_EXPORT Point3 normalized(OptionalJacobian<3, 3> H = boost::none) const; - - /** cross product @return this x q */ - GTSAM_EXPORT Point3 cross(const Point3 &q, OptionalJacobian<3, 3> H_p = boost::none, // - OptionalJacobian<3, 3> H_q = boost::none) const; - - /** dot product @return this * q*/ - GTSAM_EXPORT double dot(const Point3 &q, OptionalJacobian<1, 3> H_p = boost::none, // - OptionalJacobian<1, 3> H_q = boost::none) const; - - /// @} - /// @name Standard Interface - /// @{ - - /// return as Vector3 - const Vector3& vector() const { return *this; } - - /// get x - inline double x() const {return (*this)[0];} - - /// get y - inline double y() const {return (*this)[1];} - - /// get z - inline double z() const {return (*this)[2];} - - /// @} - - /// Output stream operator - GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point3& p); - - private: - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) { - ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Vector3); - } - }; - -template<> -struct traits : public internal::VectorSpace {}; - -template<> -struct traits : public internal::VectorSpace {}; - -#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS +/// As of GTSAM 4, in order to make GTSAM more lean, +/// it is now possible to just typedef Point3 to Vector3 +typedef Vector3 Point3; // Convenience typedef typedef std::pair Point3Pair; diff --git a/gtsam/geometry/Pose2.cpp b/gtsam/geometry/Pose2.cpp index 9c41a76c8..71df0f753 100644 --- a/gtsam/geometry/Pose2.cpp +++ b/gtsam/geometry/Pose2.cpp @@ -322,10 +322,10 @@ boost::optional align(const vector& pairs) { // calculate cos and sin double c=0,s=0; for(const Point2Pair& pair: pairs) { - Point2 dq = pair.first - cp; - Point2 dp = pair.second - cq; - c += dp.x() * dq.x() + dp.y() * dq.y(); - s += dp.y() * dq.x() - dp.x() * dq.y(); // this works but is negative from formula above !! :-( + Point2 dp = pair.first - cp; + Point2 dq = pair.second - cq; + c += dp.x() * dq.x() + dp.y() * dq.y(); + s += -dp.y() * dq.x() + dp.x() * dq.y(); } // calculate angle and translation diff --git a/gtsam/geometry/Pose3.cpp b/gtsam/geometry/Pose3.cpp index ad8f7f484..0a56e2ef5 100644 --- a/gtsam/geometry/Pose3.cpp +++ b/gtsam/geometry/Pose3.cpp @@ -430,7 +430,7 @@ boost::optional align(const vector& baPointPairs) { std::ostream &operator<<(std::ostream &os, const Pose3& pose) { // Both Rot3 and Point3 have ostream definitions so we use them. os << "R: " << pose.rotation() << "\n"; - os << "t: " << pose.translation(); + os << "t: " << pose.translation().transpose(); return os; } diff --git a/gtsam/geometry/Rot2.cpp b/gtsam/geometry/Rot2.cpp index 04ed16774..7502c4ccf 100644 --- a/gtsam/geometry/Rot2.cpp +++ b/gtsam/geometry/Rot2.cpp @@ -39,6 +39,13 @@ Rot2 Rot2::atan2(double y, double x) { return R.normalize(); } +/* ************************************************************************* */ +Rot2 Rot2::Random(std::mt19937& rng) { + uniform_real_distribution randomAngle(-M_PI, M_PI); + double angle = randomAngle(rng); + return fromAngle(angle); +} + /* ************************************************************************* */ void Rot2::print(const string& s) const { cout << s << ": " << theta() << endl; diff --git a/gtsam/geometry/Rot2.h b/gtsam/geometry/Rot2.h index f46f12540..8a361f558 100644 --- a/gtsam/geometry/Rot2.h +++ b/gtsam/geometry/Rot2.h @@ -22,6 +22,8 @@ #include #include +#include + namespace gtsam { /** @@ -79,6 +81,14 @@ namespace gtsam { /** Named constructor that behaves as atan2, i.e., y,x order (!) and normalizes */ static Rot2 atan2(double y, double x); + /** + * Random, generates random angle \in [-p,pi] + * Example: + * std::mt19937 engine(42); + * Unit3 unit = Unit3::Random(engine); + */ + static Rot2 Random(std::mt19937 & rng); + /// @} /// @name Testable /// @{ diff --git a/gtsam/geometry/SOn-inl.h b/gtsam/geometry/SOn-inl.h index 9edce8336..6180f4cc7 100644 --- a/gtsam/geometry/SOn-inl.h +++ b/gtsam/geometry/SOn-inl.h @@ -60,8 +60,9 @@ typename SO::TangentVector SO::ChartAtOrigin::Local(const SO& R, template typename SO::MatrixDD SO::AdjointMap() const { + if (N==2) return I_1x1; // SO(2) case throw std::runtime_error( - "SO::AdjointMap only implemented for SO3 and SO4."); + "SO::AdjointMap only implemented for SO2, SO3 and SO4."); } template @@ -84,26 +85,22 @@ typename SO::MatrixDD SO::LogmapDerivative(const TangentVector& omega) { throw std::runtime_error("O::LogmapDerivative only implemented for SO3."); } +// Default fixed size version (but specialized elsewehere for N=2,3,4) template typename SO::VectorN2 SO::vec( OptionalJacobian H) const { - const size_t n = rows(); - const size_t n2 = n * n; - // Vectorize - VectorN2 X(n2); - X << Eigen::Map(matrix_.data(), n2, 1); + VectorN2 X = Eigen::Map(matrix_.data()); // If requested, calculate H as (I \oplus Q) * P, // where Q is the N*N rotation matrix, and P is calculated below. if (H) { // Calculate P matrix of vectorized generators // TODO(duy): Should we refactor this as the jacobian of Hat? - Matrix P = VectorizedGenerators(n); - const size_t d = dim(); - H->resize(n2, d); - for (size_t i = 0; i < n; i++) { - H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d); + Matrix P = SO::VectorizedGenerators(); + for (size_t i = 0; i < N; i++) { + H->block(i * N, 0, N, dimension) = + matrix_ * P.block(i * N, 0, N, dimension); } } return X; diff --git a/gtsam/geometry/SOn.cpp b/gtsam/geometry/SOn.cpp index 37b6c1784..c6cff4214 100644 --- a/gtsam/geometry/SOn.cpp +++ b/gtsam/geometry/SOn.cpp @@ -22,21 +22,18 @@ namespace gtsam { template <> -GTSAM_EXPORT -Matrix SOn::Hat(const Vector& xi) { +GTSAM_EXPORT void SOn::Hat(const Vector &xi, Eigen::Ref X) { size_t n = AmbientDim(xi.size()); - if (n < 2) throw std::invalid_argument("SO::Hat: n<2 not supported"); - - Matrix X(n, n); // allocate space for n*n skew-symmetric matrix - X.setZero(); - if (n == 2) { + if (n < 2) + throw std::invalid_argument("SO::Hat: n<2 not supported"); + else if (n == 2) { // Handle SO(2) case as recursion bottom assert(xi.size() == 1); X << 0, -xi(0), xi(0), 0; } else { // Recursively call SO(n-1) call for top-left block const size_t dmin = (n - 1) * (n - 2) / 2; - X.topLeftCorner(n - 1, n - 1) = Hat(xi.tail(dmin)); + Hat(xi.tail(dmin), X.topLeftCorner(n - 1, n - 1)); // determine sign of last element (signs alternate) double sign = pow(-1.0, xi.size()); @@ -47,7 +44,14 @@ Matrix SOn::Hat(const Vector& xi) { X(j, n - 1) = -X(n - 1, j); sign = -sign; } + X(n - 1, n - 1) = 0; // bottom-right } +} + +template <> GTSAM_EXPORT Matrix SOn::Hat(const Vector &xi) { + size_t n = AmbientDim(xi.size()); + Matrix X(n, n); // allocate space for n*n skew-symmetric matrix + SOn::Hat(xi, X); return X; } @@ -99,4 +103,27 @@ SOn LieGroup::between(const SOn& g, DynamicJacobian H1, return result; } +// Dynamic version of vec +template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const { + const size_t n = rows(), n2 = n * n; + + // Vectorize + VectorN2 X(n2); + X << Eigen::Map(matrix_.data(), n2, 1); + + // If requested, calculate H as (I \oplus Q) * P, + // where Q is the N*N rotation matrix, and P is calculated below. + if (H) { + // Calculate P matrix of vectorized generators + // TODO(duy): Should we refactor this as the jacobian of Hat? + Matrix P = SOn::VectorizedGenerators(n); + const size_t d = dim(); + H->resize(n2, d); + for (size_t i = 0; i < n; i++) { + H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d); + } + } + return X; +} + } // namespace gtsam diff --git a/gtsam/geometry/SOn.h b/gtsam/geometry/SOn.h index 004569416..86b6019e1 100644 --- a/gtsam/geometry/SOn.h +++ b/gtsam/geometry/SOn.h @@ -98,8 +98,8 @@ class SO : public LieGroup, internal::DimensionSO(N)> { template > static SO Lift(size_t n, const Eigen::MatrixBase &R) { Matrix Q = Matrix::Identity(n, n); - size_t p = R.rows(); - assert(p < n && R.cols() == p); + const int p = R.rows(); + assert(p >= 0 && p <= static_cast(n) && R.cols() == p); Q.topLeftCorner(p, p) = R; return SO(Q); } @@ -208,7 +208,7 @@ class SO : public LieGroup, internal::DimensionSO(N)> { // Calculate run-time dimensionality of manifold. // Available as dimension or Dim() for fixed N. - size_t dim() const { return Dimension(matrix_.rows()); } + size_t dim() const { return Dimension(static_cast(matrix_.rows())); } /** * Hat operator creates Lie algebra element corresponding to d-vector, where d @@ -227,9 +227,10 @@ class SO : public LieGroup, internal::DimensionSO(N)> { */ static MatrixNN Hat(const TangentVector& xi); - /** - * Inverse of Hat. See note about xi element order in Hat. - */ + /// In-place version of Hat (see details there), implements recursion. + static void Hat(const Vector &xi, Eigen::Ref X); + + /// Inverse of Hat. See note about xi element order in Hat. static TangentVector Vee(const MatrixNN& X); // Chart at origin @@ -295,10 +296,10 @@ class SO : public LieGroup, internal::DimensionSO(N)> { template > static Matrix VectorizedGenerators() { constexpr size_t N2 = static_cast(N * N); - Matrix G(N2, dimension); + Eigen::Matrix G; for (size_t j = 0; j < dimension; j++) { const auto X = Hat(Vector::Unit(dimension, j)); - G.col(j) = Eigen::Map(X.data(), N2, 1); + G.col(j) = Eigen::Map(X.data()); } return G; } @@ -362,6 +363,11 @@ template <> SOn LieGroup::between(const SOn& g, DynamicJacobian H1, DynamicJacobian H2) const; +/* + * Specialize dynamic vec. + */ +template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const; + /** Serialization function */ template void serialize( diff --git a/gtsam/geometry/tests/testPoint2.cpp b/gtsam/geometry/tests/testPoint2.cpp index 8b9e8a7e6..6e4d408c7 100644 --- a/gtsam/geometry/tests/testPoint2.cpp +++ b/gtsam/geometry/tests/testPoint2.cpp @@ -237,16 +237,6 @@ TEST( Point2, circleCircleIntersection) { } -/* ************************************************************************* */ -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS -TEST( Point2, stream) { - Point2 p(1, 2); - std::ostringstream os; - os << p; - EXPECT(os.str() == "(1, 2)"); -} -#endif - /* ************************************************************************* */ int main () { TestResult tr; diff --git a/gtsam/geometry/tests/testPoint3.cpp b/gtsam/geometry/tests/testPoint3.cpp index e2396f7e9..a7c2ac50c 100644 --- a/gtsam/geometry/tests/testPoint3.cpp +++ b/gtsam/geometry/tests/testPoint3.cpp @@ -153,16 +153,6 @@ TEST( Point3, cross2) { } } -/* ************************************************************************* */ -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS -TEST( Point3, stream) { - Point3 p(1, 2, -3); - std::ostringstream os; - os << p; - EXPECT(os.str() == "[1, 2, -3]'"); -} -#endif - //************************************************************************* TEST (Point3, normalize) { Matrix actualH; diff --git a/gtsam/geometry/tests/testPose3.cpp b/gtsam/geometry/tests/testPose3.cpp index 596fa8957..8586f35a0 100644 --- a/gtsam/geometry/tests/testPose3.cpp +++ b/gtsam/geometry/tests/testPose3.cpp @@ -857,19 +857,11 @@ TEST( Pose3, adjointTranspose) { } /* ************************************************************************* */ -TEST( Pose3, stream) -{ - Pose3 T; +TEST( Pose3, stream) { std::ostringstream os; - os << T; - - string expected; -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS - expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: 0\n0\n0";; -#else - expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: [0, 0, 0]'"; -#endif + os << Pose3(); + string expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: 0 0 0"; EXPECT(os.str() == expected); } @@ -1042,14 +1034,7 @@ TEST(Pose3, print) { // Add expected rotation expected << "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\n"; - -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS - expected << "t: 1\n" - "2\n" - "3\n"; -#else - expected << "t: [" << translation.x() << ", " << translation.y() << ", " << translation.z() << "]'\n"; -#endif + expected << "t: 1 2 3\n"; // reset cout to the original stream std::cout.rdbuf(oldbuf); diff --git a/gtsam/geometry/tests/testSOn.cpp b/gtsam/geometry/tests/testSOn.cpp index 1cf8caed2..4d0ed98b3 100644 --- a/gtsam/geometry/tests/testSOn.cpp +++ b/gtsam/geometry/tests/testSOn.cpp @@ -39,8 +39,8 @@ using namespace std; using namespace gtsam; //****************************************************************************** -// Test dhynamic with n=0 -TEST(SOn, SO0) { +// Test dynamic with n=0 +TEST(SOn, SOn0) { const auto R = SOn(0); EXPECT_LONGS_EQUAL(0, R.rows()); EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension); @@ -50,7 +50,8 @@ TEST(SOn, SO0) { } //****************************************************************************** -TEST(SOn, SO5) { +// Test dynamic with n=5 +TEST(SOn, SOn5) { const auto R = SOn(5); EXPECT_LONGS_EQUAL(5, R.rows()); EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension); @@ -59,6 +60,28 @@ TEST(SOn, SO5) { EXPECT_LONGS_EQUAL(10, traits::GetDimension(R)); } +//****************************************************************************** +// Test fixed with n=2 +TEST(SOn, SO0) { + const auto R = SO<2>(); + EXPECT_LONGS_EQUAL(2, R.rows()); + EXPECT_LONGS_EQUAL(1, SO<2>::dimension); + EXPECT_LONGS_EQUAL(1, SO<2>::Dim()); + EXPECT_LONGS_EQUAL(1, R.dim()); + EXPECT_LONGS_EQUAL(1, traits>::GetDimension(R)); +} + +//****************************************************************************** +// Test fixed with n=5 +TEST(SOn, SO5) { + const auto R = SO<5>(); + EXPECT_LONGS_EQUAL(5, R.rows()); + EXPECT_LONGS_EQUAL(10, SO<5>::dimension); + EXPECT_LONGS_EQUAL(10, SO<5>::Dim()); + EXPECT_LONGS_EQUAL(10, R.dim()); + EXPECT_LONGS_EQUAL(10, traits>::GetDimension(R)); +} + //****************************************************************************** TEST(SOn, Concept) { BOOST_CONCEPT_ASSERT((IsGroup)); @@ -105,29 +128,29 @@ TEST(SOn, HatVee) { EXPECT(assert_equal((Vector)v.head<1>(), SOn::Vee(actual2))); Matrix expected3(3, 3); - expected3 << 0, -3, 2, // - 3, 0, -1, // - -2, 1, 0; + expected3 << 0, -3, 2, // + 3, 0, -1, // + -2, 1, 0; const auto actual3 = SOn::Hat(v.head<3>()); EXPECT(assert_equal(expected3, actual3)); EXPECT(assert_equal(skewSymmetric(1, 2, 3), actual3)); EXPECT(assert_equal((Vector)v.head<3>(), SOn::Vee(actual3))); Matrix expected4(4, 4); - expected4 << 0, -6, 5, 3, // - 6, 0, -4, -2, // - -5, 4, 0, 1, // - -3, 2, -1, 0; + expected4 << 0, -6, 5, 3, // + 6, 0, -4, -2, // + -5, 4, 0, 1, // + -3, 2, -1, 0; const auto actual4 = SOn::Hat(v.head<6>()); EXPECT(assert_equal(expected4, actual4)); EXPECT(assert_equal((Vector)v.head<6>(), SOn::Vee(actual4))); Matrix expected5(5, 5); - expected5 << 0,-10, 9, 7, -4, // - 10, 0, -8, -6, 3, // - -9, 8, 0, 5, -2, // - -7, 6, -5, 0, 1, // - 4, -3, 2, -1, 0; + expected5 << 0, -10, 9, 7, -4, // + 10, 0, -8, -6, 3, // + -9, 8, 0, 5, -2, // + -7, 6, -5, 0, 1, // + 4, -3, 2, -1, 0; const auto actual5 = SOn::Hat(v); EXPECT(assert_equal(expected5, actual5)); EXPECT(assert_equal((Vector)v, SOn::Vee(actual5))); @@ -159,6 +182,22 @@ TEST(SOn, RetractLocal) { CHECK(assert_equal(v1, SOn::ChartAtOrigin::Local(Q1), 1e-7)); } +//****************************************************************************** + +Matrix RetractJacobian(size_t n) { return SOn::VectorizedGenerators(n); } + +/// Test Jacobian of Retract at origin +TEST(SOn, RetractJacobian) { + Matrix actualH = RetractJacobian(3); + boost::function h = [](const Vector &v) { + return SOn::ChartAtOrigin::Retract(v).matrix(); + }; + Vector3 v; + v.setZero(); + const Matrix expectedH = numericalDerivative11(h, v, 1e-5); + CHECK(assert_equal(expectedH, actualH)); +} + //****************************************************************************** TEST(SOn, vec) { Vector10 v; @@ -166,11 +205,28 @@ TEST(SOn, vec) { SOn Q = SOn::ChartAtOrigin::Retract(v); Matrix actualH; const Vector actual = Q.vec(actualH); - boost::function h = [](const SOn& Q) { return Q.vec(); }; + boost::function h = [](const SOn &Q) { return Q.vec(); }; const Matrix H = numericalDerivative11(h, Q, 1e-5); CHECK(assert_equal(H, actualH)); } +//****************************************************************************** +TEST(SOn, VectorizedGenerators) { + // Default fixed + auto actual2 = SO<2>::VectorizedGenerators(); + CHECK(actual2.rows()==4 && actual2.cols()==1) + + // Specialized + auto actual3 = SO<3>::VectorizedGenerators(); + CHECK(actual3.rows()==9 && actual3.cols()==3) + auto actual4 = SO<4>::VectorizedGenerators(); + CHECK(actual4.rows()==16 && actual4.cols()==6) + + // Dynamic + auto actual5 = SOn::VectorizedGenerators(5); + CHECK(actual5.rows()==25 && actual5.cols()==10) +} + //****************************************************************************** int main() { TestResult tr; diff --git a/gtsam.h b/gtsam/gtsam.i similarity index 94% rename from gtsam.h rename to gtsam/gtsam.i index 1b4fc6e34..b9ecf6f3b 100644 --- a/gtsam.h +++ b/gtsam/gtsam.i @@ -618,6 +618,9 @@ class SOn { // Other methods Vector vec() const; Matrix matrix() const; + + // enabling serialization functionality + void serialize() const; }; #include @@ -2171,7 +2174,11 @@ class Values { // void insert(size_t j, const gtsam::Value& value); // void update(size_t j, const gtsam::Value& val); // gtsam::Value at(size_t j) const; - + + // The order is important: Vector has to precede Point2/Point3 so `atVector` + // can work for those fixed-size vectors. + void insert(size_t j, Vector vector); + void insert(size_t j, Matrix matrix); void insert(size_t j, const gtsam::Point2& point2); void insert(size_t j, const gtsam::Point3& point3); void insert(size_t j, const gtsam::Rot2& rot2); @@ -2188,8 +2195,6 @@ class Values { void insert(size_t j, const gtsam::PinholeCameraCal3_S2& simple_camera); void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); void insert(size_t j, const gtsam::NavState& nav_state); - void insert(size_t j, Vector vector); - void insert(size_t j, Matrix matrix); void update(size_t j, const gtsam::Point2& point2); void update(size_t j, const gtsam::Point3& point3); @@ -2569,10 +2574,12 @@ virtual class BetweenFactor : gtsam::NoiseModelFactor { void serialize() const; }; - - #include -template +template virtual class NonlinearEquality : gtsam::NoiseModelFactor { // Constructor - forces exact evaluation NonlinearEquality(size_t j, const T& feasible); @@ -2583,7 +2590,6 @@ virtual class NonlinearEquality : gtsam::NoiseModelFactor { void serialize() const; }; - #include template virtual class RangeFactor : gtsam::NoiseModelFactor { @@ -2796,28 +2802,48 @@ class SfmData { string findExampleDataFile(string name); pair load2D(string filename, - gtsam::noiseModel::Diagonal* model, int maxID, bool addNoise, bool smart); + gtsam::noiseModel::Diagonal* model, int maxIndex, bool addNoise, bool smart); pair load2D(string filename, - gtsam::noiseModel::Diagonal* model, int maxID, bool addNoise); + gtsam::noiseModel::Diagonal* model, int maxIndex, bool addNoise); pair load2D(string filename, - gtsam::noiseModel::Diagonal* model, int maxID); + gtsam::noiseModel::Diagonal* model, int maxIndex); pair load2D(string filename, gtsam::noiseModel::Diagonal* model); pair load2D(string filename); pair load2D_robust(string filename, - gtsam::noiseModel::Base* model); + gtsam::noiseModel::Base* model, int maxIndex); void save2D(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& config, gtsam::noiseModel::Diagonal* model, string filename); +// std::vector::shared_ptr> +// Ignored by pybind -> will be List[BetweenFactorPose2] +class BetweenFactorPose2s +{ + BetweenFactorPose2s(); + size_t size() const; + gtsam::BetweenFactor* at(size_t i) const; + void push_back(const gtsam::BetweenFactor* factor); +}; +gtsam::BetweenFactorPose2s parse2DFactors(string filename); + // std::vector::shared_ptr> +// Ignored by pybind -> will be List[BetweenFactorPose3] class BetweenFactorPose3s { BetweenFactorPose3s(); size_t size() const; - gtsam::BetweenFactorPose3* at(size_t i) const; - void push_back(const gtsam::BetweenFactorPose3* factor); + gtsam::BetweenFactor* at(size_t i) const; + void push_back(const gtsam::BetweenFactor* factor); }; +gtsam::BetweenFactorPose3s parse3DFactors(string filename); + +pair load3D(string filename); + +pair readG2o(string filename); +pair readG2o(string filename, bool is3D); +void writeG2o(const gtsam::NonlinearFactorGraph& graph, + const gtsam::Values& estimate, string filename); #include class InitializePose3 { @@ -2839,14 +2865,6 @@ class InitializePose3 { static gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph); }; -gtsam::BetweenFactorPose3s parse3DFactors(string filename); -pair load3D(string filename); - -pair readG2o(string filename); -pair readG2o(string filename, bool is3D); -void writeG2o(const gtsam::NonlinearFactorGraph& graph, - const gtsam::Values& estimate, string filename); - #include template virtual class KarcherMeanFactor : gtsam::NonlinearFactor { @@ -2854,7 +2872,7 @@ virtual class KarcherMeanFactor : gtsam::NonlinearFactor { }; #include -gtsam::noiseModel::Isotropic* ConvertPose3NoiseModel( +gtsam::noiseModel::Isotropic* ConvertNoiseModel( gtsam::noiseModel::Base* model, size_t d); template @@ -2873,12 +2891,146 @@ virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor { Vector evaluateError(const T& R1, const T& R2); }; -virtual class FrobeniusWormholeFactor : gtsam::NoiseModelFactor { - FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12, +#include + +virtual class ShonanFactor3 : gtsam::NoiseModelFactor { + ShonanFactor3(size_t key1, size_t key2, const gtsam::Rot3 &R12, size_t p); - FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12, - size_t p, gtsam::noiseModel::Base* model); - Vector evaluateError(const gtsam::SOn& Q1, const gtsam::SOn& Q2); + ShonanFactor3(size_t key1, size_t key2, const gtsam::Rot3 &R12, + size_t p, gtsam::noiseModel::Base *model); + Vector evaluateError(const gtsam::SOn &Q1, const gtsam::SOn &Q2); +}; + +#include +template +class BinaryMeasurement { + BinaryMeasurement(size_t key1, size_t key2, const T& measured, + const gtsam::noiseModel::Base* model); + size_t key1() const; + size_t key2() const; + T measured() const; +}; + +typedef gtsam::BinaryMeasurement BinaryMeasurementUnit3; +typedef gtsam::BinaryMeasurement BinaryMeasurementRot3; + +#include + +// TODO(frank): copy/pasta below until we have integer template paremeters in wrap! + +class ShonanAveragingParameters2 { + ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm); + ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm, string method); + gtsam::LevenbergMarquardtParams getLMParams() const; + void setOptimalityThreshold(double value); + double getOptimalityThreshold() const; + void setAnchor(size_t index, const gtsam::Rot2& value); + void setAnchorWeight(double value); + double getAnchorWeight() const; + void setKarcherWeight(double value); + double getKarcherWeight(); + void setGaugesWeight(double value); + double getGaugesWeight(); +}; + +class ShonanAveragingParameters3 { + ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm); + ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm, string method); + gtsam::LevenbergMarquardtParams getLMParams() const; + void setOptimalityThreshold(double value); + double getOptimalityThreshold() const; + void setAnchor(size_t index, const gtsam::Rot3& value); + void setAnchorWeight(double value); + double getAnchorWeight() const; + void setKarcherWeight(double value); + double getKarcherWeight(); + void setGaugesWeight(double value); + double getGaugesWeight(); +}; + +class ShonanAveraging2 { + ShonanAveraging2(string g2oFile); + ShonanAveraging2(string g2oFile, + const gtsam::ShonanAveragingParameters2 ¶meters); + + // Query properties + size_t nrUnknowns() const; + size_t nrMeasurements() const; + gtsam::Rot2 measured(size_t i); + gtsam::KeyVector keys(size_t i); + + // Matrix API (advanced use, debugging) + Matrix denseD() const; + Matrix denseQ() const; + Matrix denseL() const; + // Matrix computeLambda_(Matrix S) const; + Matrix computeLambda_(const gtsam::Values& values) const; + Matrix computeA_(const gtsam::Values& values) const; + double computeMinEigenValue(const gtsam::Values& values) const; + gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values, + const Vector& minEigenVector, double minEigenValue) const; + + // Advanced API + gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const; + gtsam::Values initializeRandomlyAt(size_t p) const; + double costAt(size_t p, const gtsam::Values& values) const; + pair computeMinEigenVector(const gtsam::Values& values) const; + bool checkOptimality(const gtsam::Values& values) const; + gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial); + // gtsam::Values tryOptimizingAt(size_t p) const; + gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const; + gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const; + gtsam::Values roundSolution(const gtsam::Values& values) const; + + // Basic API + double cost(const gtsam::Values& values) const; + gtsam::Values initializeRandomly() const; + pair run(const gtsam::Values& initial, size_t min_p, size_t max_p) const; +}; + +class ShonanAveraging3 { + ShonanAveraging3(string g2oFile); + ShonanAveraging3(string g2oFile, + const gtsam::ShonanAveragingParameters3 ¶meters); + + // TODO(frank): deprecate once we land pybind wrapper + ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors); + ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors, + const gtsam::ShonanAveragingParameters3 ¶meters); + + // Query properties + size_t nrUnknowns() const; + size_t nrMeasurements() const; + gtsam::Rot3 measured(size_t i); + gtsam::KeyVector keys(size_t i); + + // Matrix API (advanced use, debugging) + Matrix denseD() const; + Matrix denseQ() const; + Matrix denseL() const; + // Matrix computeLambda_(Matrix S) const; + Matrix computeLambda_(const gtsam::Values& values) const; + Matrix computeA_(const gtsam::Values& values) const; + double computeMinEigenValue(const gtsam::Values& values) const; + gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values, + const Vector& minEigenVector, double minEigenValue) const; + + // Advanced API + gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const; + gtsam::Values initializeRandomlyAt(size_t p) const; + double costAt(size_t p, const gtsam::Values& values) const; + pair computeMinEigenVector(const gtsam::Values& values) const; + bool checkOptimality(const gtsam::Values& values) const; + gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial); + // gtsam::Values tryOptimizingAt(size_t p) const; + gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const; + gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const; + gtsam::Values roundSolution(const gtsam::Values& values) const; + + // Basic API + double cost(const gtsam::Values& values) const; + gtsam::Values initializeRandomly() const; + pair run(const gtsam::Values& initial, size_t min_p, size_t max_p) const; }; //************************************************************************* @@ -3236,6 +3388,7 @@ namespace utilities { gtsam::KeySet createKeySet(string s, Vector I); Matrix extractPoint2(const gtsam::Values& values); Matrix extractPoint3(const gtsam::Values& values); + gtsam::Values allPose2s(gtsam::Values& values); Matrix extractPose2(const gtsam::Values& values); gtsam::Values allPose3s(gtsam::Values& values); Matrix extractPose3(const gtsam::Values& values); diff --git a/gtsam/inference/BayesTreeCliqueBase-inst.h b/gtsam/inference/BayesTreeCliqueBase-inst.h index a02fe274e..dfcc7c318 100644 --- a/gtsam/inference/BayesTreeCliqueBase-inst.h +++ b/gtsam/inference/BayesTreeCliqueBase-inst.h @@ -91,6 +91,7 @@ namespace gtsam { template size_t BayesTreeCliqueBase::numCachedSeparatorMarginals() const { + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); if (!cachedSeparatorMarginal_) return 0; @@ -144,6 +145,7 @@ namespace gtsam { typename BayesTreeCliqueBase::FactorGraphType BayesTreeCliqueBase::separatorMarginal( Eliminate function) const { + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); gttic(BayesTreeCliqueBase_separatorMarginal); // Check if the Separator marginal was already calculated if (!cachedSeparatorMarginal_) { @@ -206,6 +208,8 @@ namespace gtsam { // When a shortcut is requested, all of the shortcuts between it and the // root are also generated. So, if this clique's cached shortcut is set, // recursively call over all child cliques. Otherwise, it is unnecessary. + + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); if (cachedSeparatorMarginal_) { for(derived_ptr& child: children) { child->deleteCachedShortcuts(); diff --git a/gtsam/inference/BayesTreeCliqueBase.h b/gtsam/inference/BayesTreeCliqueBase.h index 7aca84635..7b79ccf68 100644 --- a/gtsam/inference/BayesTreeCliqueBase.h +++ b/gtsam/inference/BayesTreeCliqueBase.h @@ -24,6 +24,7 @@ #include #include +#include namespace gtsam { @@ -75,10 +76,28 @@ namespace gtsam { /** Construct from a conditional, leaving parent and child pointers uninitialized */ BayesTreeCliqueBase(const sharedConditional& conditional) : conditional_(conditional), problemSize_(1) {} + /** Shallow copy constructor */ + BayesTreeCliqueBase(const BayesTreeCliqueBase& c) : conditional_(c.conditional_), parent_(c.parent_), children(c.children), problemSize_(c.problemSize_), is_root(c.is_root) {} + + /** Shallow copy assignment constructor */ + BayesTreeCliqueBase& operator=(const BayesTreeCliqueBase& c) { + conditional_ = c.conditional_; + parent_ = c.parent_; + children = c.children; + problemSize_ = c.problemSize_; + is_root = c.is_root; + return *this; + } + /// @} - /// This stores the Cached separator margnal P(S) + /// This stores the Cached separator marginal P(S) mutable boost::optional cachedSeparatorMarginal_; + /// This protects Cached seperator marginal P(S) from concurrent read/writes + /// as many the functions which access it are const (hence the mutable) + /// leading to the false impression that these const functions are thread-safe + /// which is not true due to these mutable values. This is fixed by applying this mutex. + mutable std::mutex cachedSeparatorMarginalMutex_; public: sharedConditional conditional_; @@ -144,7 +163,9 @@ namespace gtsam { void deleteCachedShortcuts(); const boost::optional& cachedSeparatorMarginal() const { - return cachedSeparatorMarginal_; } + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); + return cachedSeparatorMarginal_; + } friend class BayesTree; @@ -159,7 +180,10 @@ namespace gtsam { KeyVector shortcut_indices(const derived_ptr& B, const FactorGraphType& p_Cp_B) const; /** Non-recursive delete cached shortcuts and marginals - internal only. */ - void deleteCachedShortcutsNonRecursive() { cachedSeparatorMarginal_ = boost::none; } + void deleteCachedShortcutsNonRecursive() { + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); + cachedSeparatorMarginal_ = boost::none; + } private: diff --git a/gtsam/navigation/CombinedImuFactor.cpp b/gtsam/navigation/CombinedImuFactor.cpp index 49c666030..e41a8de44 100644 --- a/gtsam/navigation/CombinedImuFactor.cpp +++ b/gtsam/navigation/CombinedImuFactor.cpp @@ -167,10 +167,11 @@ gtsam::NonlinearFactor::shared_ptr CombinedImuFactor::clone() const { //------------------------------------------------------------------------------ void CombinedImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "CombinedImuFactor(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) << "," - << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) << "," - << keyFormatter(this->key6()) << ")\n"; + cout << (s == "" ? s : s + "\n") << "CombinedImuFactor(" + << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << "," + << keyFormatter(this->key3()) << "," << keyFormatter(this->key4()) << "," + << keyFormatter(this->key5()) << "," << keyFormatter(this->key6()) + << ")\n"; _PIM_.print(" preintegrated measurements:"); this->noiseModel_->print(" noise model: "); } diff --git a/gtsam/navigation/CombinedImuFactor.h b/gtsam/navigation/CombinedImuFactor.h index 55cc48eda..387353136 100644 --- a/gtsam/navigation/CombinedImuFactor.h +++ b/gtsam/navigation/CombinedImuFactor.h @@ -303,8 +303,8 @@ public: GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const CombinedImuFactor&); /// print - void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const override; + void print(const std::string& s = "", const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; /// equals bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; diff --git a/gtsam/navigation/ImuFactor.cpp b/gtsam/navigation/ImuFactor.cpp index ac7221cd9..cebddf05d 100644 --- a/gtsam/navigation/ImuFactor.cpp +++ b/gtsam/navigation/ImuFactor.cpp @@ -130,10 +130,10 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor& f) { //------------------------------------------------------------------------------ void ImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "ImuFactor(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) << "," - << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) - << ")\n"; + cout << (s == "" ? s : s + "\n") << "ImuFactor(" << keyFormatter(this->key1()) + << "," << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) + << "," << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) + << ")\n"; cout << *this << endl; } @@ -226,9 +226,9 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor2& f) { //------------------------------------------------------------------------------ void ImuFactor2::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "ImuFactor2(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) - << ")\n"; + cout << (s == "" ? s : s + "\n") << "ImuFactor2(" + << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << "," + << keyFormatter(this->key3()) << ")\n"; cout << *this << endl; } diff --git a/gtsam/navigation/ImuFactor.h b/gtsam/navigation/ImuFactor.h index 957478e97..51df3f24a 100644 --- a/gtsam/navigation/ImuFactor.h +++ b/gtsam/navigation/ImuFactor.h @@ -205,8 +205,8 @@ public: /// @name Testable /// @{ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const ImuFactor&); - void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const override; + void print(const std::string& s = "", const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; /// @} @@ -283,8 +283,8 @@ public: /// @name Testable /// @{ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const ImuFactor2&); - void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const override; + void print(const std::string& s = "", const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; /// @} diff --git a/gtsam/navigation/PreintegratedRotation.cpp b/gtsam/navigation/PreintegratedRotation.cpp index c5d48b734..f827c7c59 100644 --- a/gtsam/navigation/PreintegratedRotation.cpp +++ b/gtsam/navigation/PreintegratedRotation.cpp @@ -26,12 +26,11 @@ using namespace std; namespace gtsam { void PreintegratedRotationParams::print(const string& s) const { - cout << s << endl; + cout << (s == "" ? s : s + "\n") << endl; cout << "gyroscopeCovariance:\n[\n" << gyroscopeCovariance << "\n]" << endl; if (omegaCoriolis) cout << "omegaCoriolis = (" << omegaCoriolis->transpose() << ")" << endl; - if (body_P_sensor) - body_P_sensor->print("body_P_sensor"); + if (body_P_sensor) body_P_sensor->print("body_P_sensor"); } bool PreintegratedRotationParams::equals( diff --git a/gtsam/navigation/PreintegrationBase.cpp b/gtsam/navigation/PreintegrationBase.cpp index 45560f34d..111594663 100644 --- a/gtsam/navigation/PreintegrationBase.cpp +++ b/gtsam/navigation/PreintegrationBase.cpp @@ -46,7 +46,7 @@ ostream& operator<<(ostream& os, const PreintegrationBase& pim) { //------------------------------------------------------------------------------ void PreintegrationBase::print(const string& s) const { - cout << s << *this << endl; + cout << (s == "" ? s : s + "\n") << *this << endl; } //------------------------------------------------------------------------------ diff --git a/gtsam/navigation/PreintegrationBase.h b/gtsam/navigation/PreintegrationBase.h index 2f02a95b8..e118a6232 100644 --- a/gtsam/navigation/PreintegrationBase.h +++ b/gtsam/navigation/PreintegrationBase.h @@ -116,7 +116,7 @@ class GTSAM_EXPORT PreintegrationBase { /// @name Testable /// @{ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const PreintegrationBase& pim); - virtual void print(const std::string& s) const; + virtual void print(const std::string& s="") const; /// @} /// @name Main functionality diff --git a/gtsam/navigation/tests/testNavState.cpp b/gtsam/navigation/tests/testNavState.cpp index c07240752..d38b76255 100644 --- a/gtsam/navigation/tests/testNavState.cpp +++ b/gtsam/navigation/tests/testNavState.cpp @@ -215,11 +215,7 @@ TEST(NavState, Stream) os << state; string expected; -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\np: 0\n0\n0\nv: 0\n0\n0"; -#else - expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\np: [0, 0, 0]'\nv: [0, 0, 0]'"; -#endif EXPECT(os.str() == expected); } diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 328a3facf..9a9c487b6 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -90,7 +90,7 @@ void NonlinearOptimizer::defaultOptimize() { // Iterative loop do { // Do next iteration - currentError = error(); + currentError = error(); // TODO(frank): don't do this twice at first !? Computed above! iterate(); tictoc_finishedIteration(); diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index aadbcf394..b49188b68 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -397,7 +397,7 @@ namespace gtsam { template size_t count() const { size_t i = 0; - for (const auto& key_value : *this) { + for (const auto key_value : *this) { if (dynamic_cast*>(&key_value.value)) ++i; } diff --git a/gtsam/nonlinear/utilities.h b/gtsam/nonlinear/utilities.h index a78f7a2d0..ffc279872 100644 --- a/gtsam/nonlinear/utilities.h +++ b/gtsam/nonlinear/utilities.h @@ -107,6 +107,11 @@ Matrix extractPoint3(const Values& values) { return result; } +/// Extract all Pose3 values +Values allPose2s(const Values& values) { + return values.filter(); +} + /// Extract all Pose2 values into a single matrix [x y theta] Matrix extractPose2(const Values& values) { Values::ConstFiltered poses = values.filter(); diff --git a/gtsam/sam/BearingRangeFactor.h b/gtsam/sam/BearingRangeFactor.h index af7b47446..482dbb974 100644 --- a/gtsam/sam/BearingRangeFactor.h +++ b/gtsam/sam/BearingRangeFactor.h @@ -42,14 +42,21 @@ class BearingRangeFactor public: typedef boost::shared_ptr shared_ptr; - /// default constructor + /// Default constructor BearingRangeFactor() {} - /// primary constructor - BearingRangeFactor(Key key1, Key key2, const B& measuredBearing, - const R& measuredRange, const SharedNoiseModel& model) - : Base({key1, key2}, model, T(measuredBearing, measuredRange)) { - this->initialize(expression({key1, key2})); + /// Construct from BearingRange instance + BearingRangeFactor(Key key1, Key key2, const T &bearingRange, + const SharedNoiseModel &model) + : Base({{key1, key2}}, model, T(bearingRange)) { + this->initialize(expression({{key1, key2}})); + } + + /// Construct from separate bearing and range + BearingRangeFactor(Key key1, Key key2, const B &measuredBearing, + const R &measuredRange, const SharedNoiseModel &model) + : Base({{key1, key2}}, model, T(measuredBearing, measuredRange)) { + this->initialize(expression({{key1, key2}})); } virtual ~BearingRangeFactor() {} diff --git a/gtsam/sfm/BinaryMeasurement.h b/gtsam/sfm/BinaryMeasurement.h new file mode 100644 index 000000000..c525c1b9e --- /dev/null +++ b/gtsam/sfm/BinaryMeasurement.h @@ -0,0 +1,83 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +#pragma once + +/** + * @file BinaryMeasurement.h + * @author Akshay Krishnan + * @date July 2020 + * @brief Binary measurement represents a measurement between two keys in a + * graph. A binary measurement is similar to a BetweenFactor, except that it + * does not contain an error function. It is a measurement (along with a noise + * model) from one key to another. Note that the direction is important. A + * measurement from key1 to key2 is not the same as the same measurement from + * key2 to key1. + */ + +#include +#include +#include +#include + +#include +#include + +namespace gtsam { + +template class BinaryMeasurement : public Factor { + // Check that T type is testable + BOOST_CONCEPT_ASSERT((IsTestable)); + +public: + // shorthand for a smart pointer to a measurement + using shared_ptr = typename boost::shared_ptr; + +private: + T measured_; ///< The measurement + SharedNoiseModel noiseModel_; ///< Noise model + +public: + BinaryMeasurement(Key key1, Key key2, const T &measured, + const SharedNoiseModel &model = nullptr) + : Factor(std::vector({key1, key2})), measured_(measured), + noiseModel_(model) {} + + /// @name Standard Interface + /// @{ + + Key key1() const { return keys_[0]; } + Key key2() const { return keys_[1]; } + const T &measured() const { return measured_; } + const SharedNoiseModel &noiseModel() const { return noiseModel_; } + + /// @} + /// @name Testable + /// @{ + + void print(const std::string &s, + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const { + std::cout << s << "BinaryMeasurement(" << keyFormatter(this->key1()) << "," + << keyFormatter(this->key2()) << ")\n"; + traits::Print(measured_, " measured: "); + this->noiseModel_->print(" noise model: "); + } + + bool equals(const BinaryMeasurement &expected, double tol = 1e-9) const { + const BinaryMeasurement *e = + dynamic_cast *>(&expected); + return e != nullptr && Factor::equals(*e) && + traits::Equals(this->measured_, e->measured_, tol) && + noiseModel_->equals(*expected.noiseModel()); + } + /// @} +}; +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sfm/ShonanAveraging.cpp b/gtsam/sfm/ShonanAveraging.cpp new file mode 100644 index 000000000..2485418cf --- /dev/null +++ b/gtsam/sfm/ShonanAveraging.cpp @@ -0,0 +1,854 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanAveraging.cpp + * @date March 2019 - August 2020 + * @author Frank Dellaert, David Rosen, and Jing Wu + * @brief Shonan Averaging algorithm + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace gtsam { + +// In Wrappers we have no access to this so have a default ready +static std::mt19937 kRandomNumberGenerator(42); + +using Sparse = Eigen::SparseMatrix; + +/* ************************************************************************* */ +template +ShonanAveragingParameters::ShonanAveragingParameters( + const LevenbergMarquardtParams &_lm, const std::string &method, + double optimalityThreshold, double alpha, double beta, double gamma) + : lm(_lm), optimalityThreshold(optimalityThreshold), alpha(alpha), + beta(beta), gamma(gamma) { + // By default, we will do conjugate gradient + lm.linearSolverType = LevenbergMarquardtParams::Iterative; + + // Create subgraph builder parameters + SubgraphBuilderParameters builderParameters; + builderParameters.skeletonType = SubgraphBuilderParameters::KRUSKAL; + builderParameters.skeletonWeight = SubgraphBuilderParameters::EQUAL; + builderParameters.augmentationWeight = SubgraphBuilderParameters::SKELETON; + builderParameters.augmentationFactor = 0.0; + + auto pcg = boost::make_shared(); + + // Choose optimization method + if (method == "SUBGRAPH") { + lm.iterativeParams = + boost::make_shared(builderParameters); + } else if (method == "SGPC") { + pcg->preconditioner_ = + boost::make_shared(builderParameters); + lm.iterativeParams = pcg; + } else if (method == "JACOBI") { + pcg->preconditioner_ = + boost::make_shared(); + lm.iterativeParams = pcg; + } else if (method == "QR") { + lm.setLinearSolverType("MULTIFRONTAL_QR"); + } else if (method == "CHOLESKY") { + lm.setLinearSolverType("MULTIFRONTAL_CHOLESKY"); + } else { + throw std::invalid_argument("ShonanAveragingParameters: unknown method"); + } +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 and d=3 +template struct ShonanAveragingParameters<2>; +template struct ShonanAveragingParameters<3>; + +/* ************************************************************************* */ +// Calculate number of unknown rotations referenced by measurements +template +static size_t +NrUnknowns(const typename ShonanAveraging::Measurements &measurements) { + std::set keys; + for (const auto &measurement : measurements) { + keys.insert(measurement.key1()); + keys.insert(measurement.key2()); + } + return keys.size(); +} + +/* ************************************************************************* */ +template +ShonanAveraging::ShonanAveraging(const Measurements &measurements, + const Parameters ¶meters) + : parameters_(parameters), measurements_(measurements), + nrUnknowns_(NrUnknowns(measurements)) { + for (const auto &measurement : measurements_) { + const auto &model = measurement.noiseModel(); + if (model && model->dim() != SO::dimension) { + measurement.print("Factor with incorrect noise model:\n"); + throw std::invalid_argument("ShonanAveraging: measurements passed to " + "constructor have incorrect dimension."); + } + } + Q_ = buildQ(); + D_ = buildD(); + L_ = D_ - Q_; +} + +/* ************************************************************************* */ +template +NonlinearFactorGraph ShonanAveraging::buildGraphAt(size_t p) const { + NonlinearFactorGraph graph; + auto G = boost::make_shared(SO<-1>::VectorizedGenerators(p)); + for (const auto &measurement : measurements_) { + const auto &keys = measurement.keys(); + const auto &Rij = measurement.measured(); + const auto &model = measurement.noiseModel(); + graph.emplace_shared>(keys[0], keys[1], Rij, p, model, G); + } + + // Possibly add Karcher prior + if (parameters_.beta > 0) { + const size_t dim = SOn::Dimension(p); + graph.emplace_shared>(graph.keys(), dim); + } + + // Possibly add gauge factors - they are probably useless as gradient is zero + if (parameters_.gamma > 0 && p > d + 1) { + for (auto key : graph.keys()) + graph.emplace_shared(key, p, d, parameters_.gamma); + } + + return graph; +} + +/* ************************************************************************* */ +template +double ShonanAveraging::costAt(size_t p, const Values &values) const { + const NonlinearFactorGraph graph = buildGraphAt(p); + return graph.error(values); +} + +/* ************************************************************************* */ +template +boost::shared_ptr +ShonanAveraging::createOptimizerAt(size_t p, const Values &initial) const { + // Build graph + NonlinearFactorGraph graph = buildGraphAt(p); + + // Anchor prior is added here as depends on initial value (and cost is zero) + if (parameters_.alpha > 0) { + size_t i; + Rot value; + const size_t dim = SOn::Dimension(p); + std::tie(i, value) = parameters_.anchor; + auto model = noiseModel::Isotropic::Precision(dim, parameters_.alpha); + graph.emplace_shared>(i, SOn::Lift(p, value.matrix()), + model); + } + + // Optimize + return boost::make_shared(graph, initial, + parameters_.lm); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::tryOptimizingAt(size_t p, + const Values &initial) const { + auto lm = createOptimizerAt(p, initial); + return lm->optimize(); +} + +/* ************************************************************************* */ +// Project to pxdN Stiefel manifold +template +Matrix ShonanAveraging::StiefelElementMatrix(const Values &values) { + const size_t N = values.size(); + const size_t p = values.at(0).rows(); + Matrix S(p, N * d); + for (const auto it : values.filter()) { + S.middleCols(it.key * d) = + it.value.matrix().leftCols(); // project Qj to Stiefel + } + return S; +} + +/* ************************************************************************* */ +template <> +Values ShonanAveraging<2>::projectFrom(size_t p, const Values &values) const { + Values result; + for (const auto it : values.filter()) { + assert(it.value.rows() == p); + const auto &M = it.value.matrix(); + const Rot2 R = Rot2::atan2(M(1, 0), M(0, 0)); + result.insert(it.key, R); + } + return result; +} + +template <> +Values ShonanAveraging<3>::projectFrom(size_t p, const Values &values) const { + Values result; + for (const auto it : values.filter()) { + assert(it.value.rows() == p); + const auto &M = it.value.matrix(); + const Rot3 R = Rot3::ClosestTo(M.topLeftCorner<3, 3>()); + result.insert(it.key, R); + } + return result; +} + +/* ************************************************************************* */ +template static Matrix RoundSolutionS(const Matrix &S) { + const size_t N = S.cols() / d; + // First, compute a thin SVD of S + Eigen::JacobiSVD svd(S, Eigen::ComputeThinV); + const Vector sigmas = svd.singularValues(); + + // Construct a diagonal matrix comprised of the first d singular values + using DiagonalMatrix = Eigen::DiagonalMatrix; + DiagonalMatrix Sigma_d; + Sigma_d.diagonal() = sigmas.head(); + + // Now, construct a rank-d truncated singular value decomposition for S + Matrix R = Sigma_d * svd.matrixV().leftCols().transpose(); + + // Count the number of blocks whose determinants have positive sign + size_t numPositiveBlocks = 0; + for (size_t i = 0; i < N; ++i) { + // Compute the determinant of the ith dxd block of R + double determinant = R.middleCols(d * i).determinant(); + if (determinant > 0) + ++numPositiveBlocks; + } + + if (numPositiveBlocks < N / 2) { + // Less than half of the total number of blocks have the correct sign. + // To reverse their orientations, multiply with a reflection matrix. + DiagonalMatrix reflector; + reflector.setIdentity(); + reflector.diagonal()(d - 1) = -1; + R = reflector * R; + } + + return R; +} + +/* ************************************************************************* */ +template <> Values ShonanAveraging<2>::roundSolutionS(const Matrix &S) const { + // Round to a 2*2N matrix + Matrix R = RoundSolutionS<2>(S); + + // Finally, project each dxd rotation block to SO(2) + Values values; + for (size_t j = 0; j < nrUnknowns(); ++j) { + const Rot2 Ri = Rot2::atan2(R(1, 2 * j), R(0, 2 * j)); + values.insert(j, Ri); + } + return values; +} + +template <> Values ShonanAveraging<3>::roundSolutionS(const Matrix &S) const { + // Round to a 3*3N matrix + Matrix R = RoundSolutionS<3>(S); + + // Finally, project each dxd rotation block to SO(3) + Values values; + for (size_t j = 0; j < nrUnknowns(); ++j) { + const Rot3 Ri = Rot3::ClosestTo(R.middleCols<3>(3 * j)); + values.insert(j, Ri); + } + return values; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::roundSolution(const Values &values) const { + // Project to pxdN Stiefel manifold... + Matrix S = StiefelElementMatrix(values); + // ...and call version above. + return roundSolutionS(S); +} + +/* ************************************************************************* */ +template +double ShonanAveraging::cost(const Values &values) const { + NonlinearFactorGraph graph; + for (const auto &measurement : measurements_) { + const auto &keys = measurement.keys(); + const auto &Rij = measurement.measured(); + const auto &model = measurement.noiseModel(); + graph.emplace_shared>>( + keys[0], keys[1], SO(Rij.matrix()), model); + } + // Finally, project each dxd rotation block to SO(d) + Values result; + for (const auto it : values.filter()) { + result.insert(it.key, SO(it.value.matrix())); + } + return graph.error(result); +} + +/* ************************************************************************* */ +// Get kappa from noise model +template +static double Kappa(const BinaryMeasurement &measurement) { + const auto &isotropic = boost::dynamic_pointer_cast( + measurement.noiseModel()); + if (!isotropic) { + throw std::invalid_argument( + "Shonan averaging noise models must be isotropic."); + } + const double sigma = isotropic->sigma(); + return 1.0 / (sigma * sigma); +} + +/* ************************************************************************* */ +template Sparse ShonanAveraging::buildD() const { + // Each measurement contributes 2*d elements along the diagonal of the + // degree matrix. + static constexpr size_t stride = 2 * d; + + // Reserve space for triplets + std::vector> triplets; + triplets.reserve(stride * measurements_.size()); + + for (const auto &measurement : measurements_) { + // Get pose keys + const auto &keys = measurement.keys(); + + // Get kappa from noise model + double kappa = Kappa(measurement); + + const size_t di = d * keys[0], dj = d * keys[1]; + for (size_t k = 0; k < d; k++) { + // Elements of ith block-diagonal + triplets.emplace_back(di + k, di + k, kappa); + // Elements of jth block-diagonal + triplets.emplace_back(dj + k, dj + k, kappa); + } + } + + // Construct and return a sparse matrix from these triplets + const size_t dN = d * nrUnknowns(); + Sparse D(dN, dN); + D.setFromTriplets(triplets.begin(), triplets.end()); + + return D; +} + +/* ************************************************************************* */ +template Sparse ShonanAveraging::buildQ() const { + // Each measurement contributes 2*d^2 elements on a pair of symmetric + // off-diagonal blocks + static constexpr size_t stride = 2 * d * d; + + // Reserve space for triplets + std::vector> triplets; + triplets.reserve(stride * measurements_.size()); + + for (const auto &measurement : measurements_) { + // Get pose keys + const auto &keys = measurement.keys(); + + // Extract rotation measurement + const auto Rij = measurement.measured().matrix(); + + // Get kappa from noise model + double kappa = Kappa(measurement); + + const size_t di = d * keys[0], dj = d * keys[1]; + for (size_t r = 0; r < d; r++) { + for (size_t c = 0; c < d; c++) { + // Elements of ij block + triplets.emplace_back(di + r, dj + c, kappa * Rij(r, c)); + // Elements of ji block + triplets.emplace_back(dj + r, di + c, kappa * Rij(c, r)); + } + } + } + + // Construct and return a sparse matrix from these triplets + const size_t dN = d * nrUnknowns(); + Sparse Q(dN, dN); + Q.setFromTriplets(triplets.begin(), triplets.end()); + + return Q; +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeLambda(const Matrix &S) const { + // Each pose contributes 2*d elements along the diagonal of Lambda + static constexpr size_t stride = d * d; + + // Reserve space for triplets + const size_t N = nrUnknowns(); + std::vector> triplets; + triplets.reserve(stride * N); + + // Do sparse-dense multiply to get Q*S' + auto QSt = Q_ * S.transpose(); + + for (size_t j = 0; j < N; j++) { + // Compute B, the building block for the j^th diagonal block of Lambda + const size_t dj = d * j; + Matrix B = QSt.middleRows(dj, d) * S.middleCols(dj); + + // Elements of jth block-diagonal + for (size_t r = 0; r < d; r++) + for (size_t c = 0; c < d; c++) + triplets.emplace_back(dj + r, dj + c, 0.5 * (B(r, c) + B(c, r))); + } + + // Construct and return a sparse matrix from these triplets + Sparse Lambda(d * N, d * N); + Lambda.setFromTriplets(triplets.begin(), triplets.end()); + return Lambda; +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeLambda(const Values &values) const { + // Project to pxdN Stiefel manifold... + Matrix S = StiefelElementMatrix(values); + // ...and call version above. + return computeLambda(S); +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeA(const Values &values) const { + assert(values.size() == nrUnknowns()); + const Matrix S = StiefelElementMatrix(values); + auto Lambda = computeLambda(S); + return Lambda - Q_; +} + +/* ************************************************************************* */ +/// MINIMUM EIGENVALUE COMPUTATIONS + +/** This is a lightweight struct used in conjunction with Spectra to compute + * the minimum eigenvalue and eigenvector of a sparse matrix A; it has a single + * nontrivial function, perform_op(x,y), that computes and returns the product + * y = (A + sigma*I) x */ +struct MatrixProdFunctor { + // Const reference to an externally-held matrix whose minimum-eigenvalue we + // want to compute + const Sparse &A_; + + // Spectral shift + double sigma_; + + // Constructor + explicit MatrixProdFunctor(const Sparse &A, double sigma = 0) + : A_(A), sigma_(sigma) {} + + int rows() const { return A_.rows(); } + int cols() const { return A_.cols(); } + + // Matrix-vector multiplication operation + void perform_op(const double *x, double *y) const { + // Wrap the raw arrays as Eigen Vector types + Eigen::Map X(x, rows()); + Eigen::Map Y(y, rows()); + + // Do the multiplication using wrapped Eigen vectors + Y = A_ * X + sigma_ * X; + } +}; + +/// Function to compute the minimum eigenvalue of A using Lanczos in Spectra. +/// This does 2 things: +/// +/// (1) Quick (coarse) eigenvalue computation to estimate the largest-magnitude +/// eigenvalue (2) A second eigenvalue computation applied to A-sigma*I, where +/// sigma is chosen to make the minimum eigenvalue of A the extremal eigenvalue +/// of A-sigma*I +/// +/// Upon completion, this returns a boolean value indicating whether the minimum +/// eigenvalue was computed to the required precision -- if so, its sets the +/// values of minEigenValue and minEigenVector appropriately + +/// Note that in the following function signature, S is supposed to be the +/// block-row-matrix that is a critical point for the optimization algorithm; +/// either S (Stiefel manifold) or R (block rotations). We use this to +/// construct a starting vector v for the Lanczos process that will be close to +/// the minimum eigenvector we're looking for whenever the relaxation is exact +/// -- this is a key feature that helps to make this method fast. Note that +/// instead of passing in all of S, it would be enough to pass in one of S's +/// *rows*, if that's more convenient. + +// For the defaults, David Rosen says: +// - maxIterations refers to the max number of Lanczos iterations to run; +// ~1000 should be sufficiently large +// - We've been using 10^-4 for the nonnegativity tolerance +// - for numLanczosVectors, 20 is a good default value + +static bool +SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue, + Vector *minEigenVector = 0, size_t *numIterations = 0, + size_t maxIterations = 1000, + double minEigenvalueNonnegativityTolerance = 10e-4, + Eigen::Index numLanczosVectors = 20) { + // a. Estimate the largest-magnitude eigenvalue of this matrix using Lanczos + MatrixProdFunctor lmOperator(A); + Spectra::SymEigsSolver + lmEigenValueSolver(&lmOperator, 1, std::min(numLanczosVectors, A.rows())); + lmEigenValueSolver.init(); + + const int lmConverged = lmEigenValueSolver.compute( + maxIterations, 1e-4, Spectra::SELECT_EIGENVALUE::LARGEST_MAGN); + + // Check convergence and bail out if necessary + if (lmConverged != 1) + return false; + + const double lmEigenValue = lmEigenValueSolver.eigenvalues()(0); + + if (lmEigenValue < 0) { + // The largest-magnitude eigenvalue is negative, and therefore also the + // minimum eigenvalue, so just return this solution + *minEigenValue = lmEigenValue; + if (minEigenVector) { + *minEigenVector = lmEigenValueSolver.eigenvectors(1).col(0); + minEigenVector->normalize(); // Ensure that this is a unit vector + } + return true; + } + + // The largest-magnitude eigenvalue is positive, and is therefore the + // maximum eigenvalue. Therefore, after shifting the spectrum of A + // by -2*lmEigenValue (by forming A - 2*lambda_max*I), the shifted + // spectrum will lie in the interval [minEigenValue(A) - 2* lambda_max(A), + // -lambda_max*A]; in particular, the largest-magnitude eigenvalue of + // A - 2*lambda_max*I is minEigenValue - 2*lambda_max, with corresponding + // eigenvector v_min + + MatrixProdFunctor minShiftedOperator(A, -2 * lmEigenValue); + + Spectra::SymEigsSolver + minEigenValueSolver(&minShiftedOperator, 1, + std::min(numLanczosVectors, A.rows())); + + // If S is a critical point of F, then S^T is also in the null space of S - + // Lambda(S) (cf. Lemma 6 of the tech report), and therefore its rows are + // eigenvectors corresponding to the eigenvalue 0. In the case that the + // relaxation is exact, this is the *minimum* eigenvalue, and therefore the + // rows of S are exactly the eigenvectors that we're looking for. On the + // other hand, if the relaxation is *not* exact, then S - Lambda(S) has at + // least one strictly negative eigenvalue, and the rows of S are *unstable + // fixed points* for the Lanczos iterations. Thus, we will take a slightly + // "fuzzed" version of the first row of S as an initialization for the + // Lanczos iterations; this allows for rapid convergence in the case that + // the relaxation is exact (since are starting close to a solution), while + // simultaneously allowing the iterations to escape from this fixed point in + // the case that the relaxation is not exact. + Vector v0 = S.row(0).transpose(); + Vector perturbation(v0.size()); + perturbation.setRandom(); + perturbation.normalize(); + Vector xinit = v0 + (.03 * v0.norm()) * perturbation; // Perturb v0 by ~3% + + // Use this to initialize the eigensolver + minEigenValueSolver.init(xinit.data()); + + // Now determine the relative precision required in the Lanczos method in + // order to be able to estimate the smallest eigenvalue within an *absolute* + // tolerance of 'minEigenvalueNonnegativityTolerance' + const int minConverged = minEigenValueSolver.compute( + maxIterations, minEigenvalueNonnegativityTolerance / lmEigenValue, + Spectra::SELECT_EIGENVALUE::LARGEST_MAGN); + + if (minConverged != 1) + return false; + + *minEigenValue = minEigenValueSolver.eigenvalues()(0) + 2 * lmEigenValue; + if (minEigenVector) { + *minEigenVector = minEigenValueSolver.eigenvectors(1).col(0); + minEigenVector->normalize(); // Ensure that this is a unit vector + } + if (numIterations) + *numIterations = minEigenValueSolver.num_iterations(); + return true; +} + +/* ************************************************************************* */ +template Sparse ShonanAveraging::computeA(const Matrix &S) const { + auto Lambda = computeLambda(S); + return Lambda - Q_; +} + +/* ************************************************************************* */ +template +double ShonanAveraging::computeMinEigenValue(const Values &values, + Vector *minEigenVector) const { + assert(values.size() == nrUnknowns()); + const Matrix S = StiefelElementMatrix(values); + auto A = computeA(S); + + double minEigenValue; + bool success = SparseMinimumEigenValue(A, S, &minEigenValue, minEigenVector); + if (!success) { + throw std::runtime_error( + "SparseMinimumEigenValue failed to compute minimum eigenvalue."); + } + return minEigenValue; +} + +/* ************************************************************************* */ +template +std::pair +ShonanAveraging::computeMinEigenVector(const Values &values) const { + Vector minEigenVector; + double minEigenValue = computeMinEigenValue(values, &minEigenVector); + return std::make_pair(minEigenValue, minEigenVector); +} + +/* ************************************************************************* */ +template +bool ShonanAveraging::checkOptimality(const Values &values) const { + double minEigenValue = computeMinEigenValue(values); + return minEigenValue > parameters_.optimalityThreshold; +} + +/* ************************************************************************* */ +/// Create a tangent direction xi with eigenvector segment v_i +template +Vector ShonanAveraging::MakeATangentVector(size_t p, const Vector &v, + size_t i) { + // Create a tangent direction xi with eigenvector segment v_i + const size_t dimension = SOn::Dimension(p); + const auto v_i = v.segment(d * i); + Vector xi = Vector::Zero(dimension); + double sign = pow(-1.0, round((p + 1) / 2) + 1); + for (size_t j = 0; j < d; j++) { + xi(j + p - d - 1) = sign * v_i(d - j - 1); + sign = -sign; + } + return xi; +} + +/* ************************************************************************* */ +template +Matrix ShonanAveraging::riemannianGradient(size_t p, + const Values &values) const { + Matrix S_dot = StiefelElementMatrix(values); + // calculate the gradient of F(Q_dot) at Q_dot + Matrix euclideanGradient = 2 * (L_ * (S_dot.transpose())).transpose(); + // cout << "euclidean gradient rows and cols" << euclideanGradient.rows() << + // "\t" << euclideanGradient.cols() << endl; + + // project the gradient onto the entire euclidean space + Matrix symBlockDiagProduct(p, d * nrUnknowns()); + for (size_t i = 0; i < nrUnknowns(); i++) { + // Compute block product + const size_t di = d * i; + const Matrix P = S_dot.middleCols(di).transpose() * + euclideanGradient.middleCols(di); + // Symmetrize this block + Matrix S = .5 * (P + P.transpose()); + // Compute S_dot * S and set corresponding block + symBlockDiagProduct.middleCols(di) = S_dot.middleCols(di) * S; + } + Matrix riemannianGradient = euclideanGradient - symBlockDiagProduct; + return riemannianGradient; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::LiftwithDescent(size_t p, const Values &values, + const Vector &minEigenVector) { + Values lifted = LiftTo(p, values); + for (auto it : lifted.filter()) { + // Create a tangent direction xi with eigenvector segment v_i + // Assumes key is 0-based integer + const Vector xi = MakeATangentVector(p, minEigenVector, it.key); + // Move the old value in the descent direction + it.value = it.value.retract(xi); + } + return lifted; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeWithDescent( + size_t p, const Values &values, const Vector &minEigenVector, + double minEigenValue, double gradienTolerance, + double preconditionedGradNormTolerance) const { + double funcVal = costAt(p - 1, values); + double alphaMin = 1e-2; + double alpha = + std::max(1024 * alphaMin, 10 * gradienTolerance / fabs(minEigenValue)); + vector alphas; + vector fvals; + // line search + while ((alpha >= alphaMin)) { + Values Qplus = LiftwithDescent(p, values, alpha * minEigenVector); + double funcValTest = costAt(p, Qplus); + Matrix gradTest = riemannianGradient(p, Qplus); + double gradTestNorm = gradTest.norm(); + // Record alpha and funcVal + alphas.push_back(alpha); + fvals.push_back(funcValTest); + if ((funcVal > funcValTest) && (gradTestNorm > gradienTolerance)) { + return Qplus; + } + alpha /= 2; + } + + auto fminIter = min_element(fvals.begin(), fvals.end()); + auto minIdx = distance(fvals.begin(), fminIter); + double fMin = fvals[minIdx]; + double aMin = alphas[minIdx]; + if (fMin < funcVal) { + Values Qplus = LiftwithDescent(p, values, aMin * minEigenVector); + return Qplus; + } + + return LiftwithDescent(p, values, alpha * minEigenVector); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomly(std::mt19937 &rng) const { + Values initial; + for (size_t j = 0; j < nrUnknowns(); j++) { + initial.insert(j, Rot::Random(rng)); + } + return initial; +} + +/* ************************************************************************* */ +template Values ShonanAveraging::initializeRandomly() const { + return initializeRandomly(kRandomNumberGenerator); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomlyAt(size_t p, + std::mt19937 &rng) const { + const Values randomRotations = initializeRandomly(rng); + return LiftTo(p, randomRotations); // lift to p! +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomlyAt(size_t p) const { + return initializeRandomlyAt(p, kRandomNumberGenerator); +} + +/* ************************************************************************* */ +template +std::pair ShonanAveraging::run(const Values &initialEstimate, + size_t pMin, + size_t pMax) const { + Values Qstar; + Values initialSOp = LiftTo(pMin, initialEstimate); // lift to pMin! + for (size_t p = pMin; p <= pMax; p++) { + // Optimize until convergence at this level + Qstar = tryOptimizingAt(p, initialSOp); + + // Check certificate of global optimzality + Vector minEigenVector; + double minEigenValue = computeMinEigenValue(Qstar, &minEigenVector); + if (minEigenValue > parameters_.optimalityThreshold) { + // If at global optimum, round and return solution + const Values SO3Values = roundSolution(Qstar); + return std::make_pair(SO3Values, minEigenValue); + } + + // Not at global optimimum yet, so check whether we will go to next level + if (p != pMax) { + // Calculate initial estimate for next level by following minEigenVector + initialSOp = + initializeWithDescent(p + 1, Qstar, minEigenVector, minEigenValue); + } + } + throw std::runtime_error("Shonan::run did not converge for given pMax"); +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 +template class ShonanAveraging<2>; + +ShonanAveraging2::ShonanAveraging2(const Measurements &measurements, + const Parameters ¶meters) + : ShonanAveraging<2>(measurements, parameters) {} +ShonanAveraging2::ShonanAveraging2(string g2oFile, const Parameters ¶meters) + : ShonanAveraging<2>(parseMeasurements(g2oFile), parameters) {} + +/* ************************************************************************* */ +// Explicit instantiation for d=3 +template class ShonanAveraging<3>; + +ShonanAveraging3::ShonanAveraging3(const Measurements &measurements, + const Parameters ¶meters) + : ShonanAveraging<3>(measurements, parameters) {} + +ShonanAveraging3::ShonanAveraging3(string g2oFile, const Parameters ¶meters) + : ShonanAveraging<3>(parseMeasurements(g2oFile), parameters) {} + +// TODO(frank): Deprecate after we land pybind wrapper + +// Extract Rot3 measurement from Pose3 betweenfactors +// Modeled after similar function in dataset.cpp +static BinaryMeasurement +convert(const BetweenFactor::shared_ptr &f) { + auto gaussian = + boost::dynamic_pointer_cast(f->noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose3 measurements " + "with Gaussian noise models."); + const Matrix6 M = gaussian->covariance(); + return BinaryMeasurement( + f->key1(), f->key2(), f->measured().rotation(), + noiseModel::Gaussian::Covariance(M.block<3, 3>(3, 3), true)); +} + +static ShonanAveraging3::Measurements +extractRot3Measurements(const BetweenFactorPose3s &factors) { + ShonanAveraging3::Measurements result; + result.reserve(factors.size()); + for (auto f : factors) + result.push_back(convert(f)); + return result; +} + +ShonanAveraging3::ShonanAveraging3(const BetweenFactorPose3s &factors, + const Parameters ¶meters) + : ShonanAveraging<3>(extractRot3Measurements(factors), parameters) {} + +/* ************************************************************************* */ +} // namespace gtsam diff --git a/gtsam/sfm/ShonanAveraging.h b/gtsam/sfm/ShonanAveraging.h new file mode 100644 index 000000000..ed94329a2 --- /dev/null +++ b/gtsam/sfm/ShonanAveraging.h @@ -0,0 +1,365 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanAveraging.h + * @date March 2019 - August 2020 + * @author Frank Dellaert, David Rosen, and Jing Wu + * @brief Shonan Averaging algorithm + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace gtsam { +class NonlinearFactorGraph; +class LevenbergMarquardtOptimizer; + +/// Parameters governing optimization etc. +template struct GTSAM_EXPORT ShonanAveragingParameters { + // Select Rot2 or Rot3 interface based template parameter d + using Rot = typename std::conditional::type; + using Anchor = std::pair; + + // Paremeters themselves: + LevenbergMarquardtParams lm; // LM parameters + double optimalityThreshold; // threshold used in checkOptimality + Anchor anchor; // pose to use as anchor if not Karcher + double alpha; // weight of anchor-based prior (default 0) + double beta; // weight of Karcher-based prior (default 1) + double gamma; // weight of gauge-fixing factors (default 0) + + ShonanAveragingParameters(const LevenbergMarquardtParams &lm = + LevenbergMarquardtParams::CeresDefaults(), + const std::string &method = "JACOBI", + double optimalityThreshold = -1e-4, + double alpha = 0.0, double beta = 1.0, + double gamma = 0.0); + + LevenbergMarquardtParams getLMParams() const { return lm; } + + void setOptimalityThreshold(double value) { optimalityThreshold = value; } + double getOptimalityThreshold() const { return optimalityThreshold; } + + void setAnchor(size_t index, const Rot &value) { anchor = {index, value}; } + + void setAnchorWeight(double value) { alpha = value; } + double getAnchorWeight() { return alpha; } + + void setKarcherWeight(double value) { beta = value; } + double getKarcherWeight() { return beta; } + + void setGaugesWeight(double value) { gamma = value; } + double getGaugesWeight() { return gamma; } +}; + +using ShonanAveragingParameters2 = ShonanAveragingParameters<2>; +using ShonanAveragingParameters3 = ShonanAveragingParameters<3>; + +/** + * Class that implements Shonan Averaging from our ECCV'20 paper. + * Note: The "basic" API uses all Rot values (Rot2 or Rot3, depending on value + * of d), whereas the different levels and "advanced" API at SO(p) needs Values + * of type SOn. + * + * The template parameter d can be 2 or 3. + * Both are specialized in the .cpp file. + * + * If you use this code in your work, please consider citing our paper: + * Shonan Rotation Averaging, Global Optimality by Surfing SO(p)^n + * Frank Dellaert, David M. Rosen, Jing Wu, Robert Mahony, and Luca Carlone, + * European Computer Vision Conference, 2020. + * You can view our ECCV spotlight video at https://youtu.be/5ppaqMyHtE0 + */ +template class GTSAM_EXPORT ShonanAveraging { +public: + using Sparse = Eigen::SparseMatrix; + + // Define the Parameters type and use its typedef of the rotation type: + using Parameters = ShonanAveragingParameters; + using Rot = typename Parameters::Rot; + + // We store SO(d) BetweenFactors to get noise model + // TODO(frank): use BinaryMeasurement? + using Measurements = std::vector>; + +private: + Parameters parameters_; + Measurements measurements_; + size_t nrUnknowns_; + Sparse D_; // Sparse (diagonal) degree matrix + Sparse Q_; // Sparse measurement matrix, == \tilde{R} in Eriksson18cvpr + Sparse L_; // connection Laplacian L = D - Q, needed for optimality check + + /** + * Build 3Nx3N sparse matrix consisting of rotation measurements, arranged as + * (i,j) and (j,i) blocks within a sparse matrix. + */ + Sparse buildQ() const; + + /// Build 3Nx3N sparse degree matrix D + Sparse buildD() const; + +public: + /// @name Standard Constructors + /// @{ + + /// Construct from set of relative measurements (given as BetweenFactor + /// for now) NoiseModel *must* be isotropic. + ShonanAveraging(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + + /// @} + /// @name Query properties + /// @{ + + /// Return number of unknowns + size_t nrUnknowns() const { return nrUnknowns_; } + + /// Return number of measurements + size_t nrMeasurements() const { return measurements_.size(); } + + /// k^th binary measurement + const BinaryMeasurement &measurement(size_t k) const { + return measurements_[k]; + } + + /// k^th measurement, as a Rot. + const Rot &measured(size_t k) const { return measurements_[k].measured(); } + + /// Keys for k^th measurement, as a vector of Key values. + const KeyVector &keys(size_t k) const { return measurements_[k].keys(); } + + /// @} + /// @name Matrix API (advanced use, debugging) + /// @{ + + Sparse D() const { return D_; } ///< Sparse version of D + Matrix denseD() const { return Matrix(D_); } ///< Dense version of D + Sparse Q() const { return Q_; } ///< Sparse version of Q + Matrix denseQ() const { return Matrix(Q_); } ///< Dense version of Q + Sparse L() const { return L_; } ///< Sparse version of L + Matrix denseL() const { return Matrix(L_); } ///< Dense version of L + + /// Version that takes pxdN Stiefel manifold elements + Sparse computeLambda(const Matrix &S) const; + + /// Dense versions of computeLambda for wrapper/testing + Matrix computeLambda_(const Values &values) const { + return Matrix(computeLambda(values)); + } + + /// Dense versions of computeLambda for wrapper/testing + Matrix computeLambda_(const Matrix &S) const { + return Matrix(computeLambda(S)); + } + + /// Compute A matrix whose Eigenvalues we will examine + Sparse computeA(const Values &values) const; + + /// Version that takes pxdN Stiefel manifold elements + Sparse computeA(const Matrix &S) const; + + /// Dense version of computeA for wrapper/testing + Matrix computeA_(const Values &values) const { + return Matrix(computeA(values)); + } + + /// Project to pxdN Stiefel manifold + static Matrix StiefelElementMatrix(const Values &values); + + /** + * Compute minimum eigenvalue for optimality check. + * @param values: should be of type SOn + */ + double computeMinEigenValue(const Values &values, + Vector *minEigenVector = nullptr) const; + + /// Project pxdN Stiefel manifold matrix S to Rot3^N + Values roundSolutionS(const Matrix &S) const; + + /// Create a tangent direction xi with eigenvector segment v_i + static Vector MakeATangentVector(size_t p, const Vector &v, size_t i); + + /// Calculate the riemannian gradient of F(values) at values + Matrix riemannianGradient(size_t p, const Values &values) const; + + /** + * Lift up the dimension of values in type SO(p-1) with descent direction + * provided by minEigenVector and return new values in type SO(p) + */ + static Values LiftwithDescent(size_t p, const Values &values, + const Vector &minEigenVector); + + /** + * Given some values at p-1, return new values at p, by doing a line search + * along the descent direction, computed from the minimum eigenvector at p-1. + * @param values should be of type SO(p-1) + * @param minEigenVector corresponding to minEigenValue at level p-1 + * @return values of type SO(p) + */ + Values + initializeWithDescent(size_t p, const Values &values, + const Vector &minEigenVector, double minEigenValue, + double gradienTolerance = 1e-2, + double preconditionedGradNormTolerance = 1e-4) const; + /// @} + /// @name Advanced API + /// @{ + + /** + * Build graph for SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + */ + NonlinearFactorGraph buildGraphAt(size_t p) const; + + /** + * Create initial Values of type SO(p) + * @param p the dimensionality of the rotation manifold + */ + Values initializeRandomlyAt(size_t p, std::mt19937 &rng) const; + + /// Version of initializeRandomlyAt with fixed random seed. + Values initializeRandomlyAt(size_t p) const; + + /** + * Calculate cost for SO(p) + * Values should be of type SO(p) + */ + double costAt(size_t p, const Values &values) const; + + /** + * Given an estimated local minimum Yopt for the (possibly lifted) + * relaxation, this function computes and returns the block-diagonal elements + * of the corresponding Lagrange multiplier. + */ + Sparse computeLambda(const Values &values) const; + + /** + * Compute minimum eigenvalue for optimality check. + * @param values: should be of type SOn + * @return minEigenVector and minEigenValue + */ + std::pair computeMinEigenVector(const Values &values) const; + + /** + * Check optimality + * @param values: should be of type SOn + */ + bool checkOptimality(const Values &values) const; + + /** + * Try to create optimizer at SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + * @param initial initial SO(p) values + * @return lm optimizer + */ + boost::shared_ptr createOptimizerAt( + size_t p, const Values &initial) const; + + /** + * Try to optimize at SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + * @param initial initial SO(p) values + * @return SO(p) values + */ + Values tryOptimizingAt(size_t p, const Values &initial) const; + + /** + * Project from SO(p) to Rot2 or Rot3 + * Values should be of type SO(p) + */ + Values projectFrom(size_t p, const Values &values) const; + + /** + * Project from SO(p)^N to Rot2^N or Rot3^N + * Values should be of type SO(p) + */ + Values roundSolution(const Values &values) const; + + /// Lift Values of type T to SO(p) + template static Values LiftTo(size_t p, const Values &values) { + Values result; + for (const auto it : values.filter()) { + result.insert(it.key, SOn::Lift(p, it.value.matrix())); + } + return result; + } + + /// @} + /// @name Basic API + /// @{ + + /** + * Calculate cost for SO(3) + * Values should be of type Rot3 + */ + double cost(const Values &values) const; + + /** + * Initialize randomly at SO(d) + * @param rng random number generator + * Example: + * std::mt19937 rng(42); + * Values initial = initializeRandomly(rng, p); + */ + Values initializeRandomly(std::mt19937 &rng) const; + + /// Random initialization for wrapper, fixed random seed. + Values initializeRandomly() const; + + /** + * Optimize at different values of p until convergence. + * @param initial initial Rot3 values + * @param pMin value of p to start Riemanian staircase at (default: d). + * @param pMax maximum value of p to try (default: 10) + * @return (Rot3 values, minimum eigenvalue) + */ + std::pair run(const Values &initialEstimate, size_t pMin = d, + size_t pMax = 10) const; + /// @} +}; + +// Subclasses for d=2 and d=3 that explicitly instantiate, as well as provide a +// convenience interface with file access. + +class ShonanAveraging2 : public ShonanAveraging<2> { +public: + ShonanAveraging2(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + ShonanAveraging2(string g2oFile, const Parameters ¶meters = Parameters()); +}; + +class ShonanAveraging3 : public ShonanAveraging<3> { +public: + ShonanAveraging3(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + ShonanAveraging3(string g2oFile, const Parameters ¶meters = Parameters()); + + // TODO(frank): Deprecate after we land pybind wrapper + ShonanAveraging3(const BetweenFactorPose3s &factors, + const Parameters ¶meters = Parameters()); +}; +} // namespace gtsam diff --git a/gtsam/sfm/ShonanFactor.cpp b/gtsam/sfm/ShonanFactor.cpp new file mode 100644 index 000000000..b911fb5a4 --- /dev/null +++ b/gtsam/sfm/ShonanFactor.cpp @@ -0,0 +1,141 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file FrobeniusFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Various factors that minimize some Frobenius norm + */ + +#include + +#include +#include +#include + +#include +#include +#include + +using namespace std; + +namespace gtsam { + +//****************************************************************************** +template +ShonanFactor::ShonanFactor(Key j1, Key j2, const Rot &R12, size_t p, + const SharedNoiseModel &model, + const boost::shared_ptr &G) + : NoiseModelFactor2(ConvertNoiseModel(model, p * d), j1, j2), + M_(R12.matrix()), // d*d in all cases + p_(p), // 4 for SO(4) + pp_(p * p), // 16 for SO(4) + G_(G) { + if (noiseModel()->dim() != d * p_) + throw std::invalid_argument( + "ShonanFactor: model with incorrect dimension."); + if (!G) { + G_ = boost::make_shared(); + *G_ = SOn::VectorizedGenerators(p); // expensive! + } + if (static_cast(G_->rows()) != pp_ || + static_cast(G_->cols()) != SOn::Dimension(p)) + throw std::invalid_argument("ShonanFactor: passed in generators " + "of incorrect dimension."); +} + +//****************************************************************************** +template +void ShonanFactor::print(const std::string &s, + const KeyFormatter &keyFormatter) const { + std::cout << s << "ShonanFactor<" << p_ << ">(" << keyFormatter(key1()) << "," + << keyFormatter(key2()) << ")\n"; + traits::Print(M_, " M: "); + noiseModel_->print(" noise model: "); +} + +//****************************************************************************** +template +bool ShonanFactor::equals(const NonlinearFactor &expected, + double tol) const { + auto e = dynamic_cast(&expected); + return e != nullptr && NoiseModelFactor2::equals(*e, tol) && + p_ == e->p_ && M_ == e->M_; +} + +//****************************************************************************** +template +void ShonanFactor::fillJacobians(const Matrix &M1, const Matrix &M2, + boost::optional H1, + boost::optional H2) const { + gttic(ShonanFactor_Jacobians); + const size_t dim = p_ * d; // Stiefel manifold dimension + + if (H1) { + // If asked, calculate Jacobian H1 as as (M' \otimes M1) * G + // M' = dxd, M1 = pxp, G = (p*p)xDim(p), result should be dim x Dim(p) + // (M' \otimes M1) is dim*dim, but last pp-dim columns are zero + *H1 = Matrix::Zero(dim, G_->cols()); + for (size_t j = 0; j < d; j++) { + auto MG_j = M1 * G_->middleRows(j * p_, p_); // p_ * Dim(p) + for (size_t i = 0; i < d; i++) { + H1->middleRows(i * p_, p_) -= M_(j, i) * MG_j; + } + } + } + if (H2) { + // If asked, calculate Jacobian H2 as as (I_d \otimes M2) * G + // I_d = dxd, M2 = pxp, G = (p*p)xDim(p), result should be dim x Dim(p) + // (I_d \otimes M2) is dim*dim, but again last pp-dim columns are zero + H2->resize(dim, G_->cols()); + for (size_t i = 0; i < d; i++) { + H2->middleRows(i * p_, p_) = M2 * G_->middleRows(i * p_, p_); + } + } +} + +//****************************************************************************** +template +Vector ShonanFactor::evaluateError(const SOn &Q1, const SOn &Q2, + boost::optional H1, + boost::optional H2) const { + gttic(ShonanFactor_evaluateError); + + const Matrix &M1 = Q1.matrix(); + const Matrix &M2 = Q2.matrix(); + if (M1.rows() != static_cast(p_) || M2.rows() != static_cast(p_)) + throw std::invalid_argument("Invalid dimension SOn values passed to " + "ShonanFactor::evaluateError"); + + const size_t dim = p_ * d; // Stiefel manifold dimension + Vector fQ2(dim), hQ1(dim); + + // Vectorize and extract only d leftmost columns, i.e. vec(M2*P) + fQ2 << Eigen::Map(M2.data(), dim, 1); + + // Vectorize M1*P*R12 + const Matrix Q1PR12 = M1.leftCols() * M_; + hQ1 << Eigen::Map(Q1PR12.data(), dim, 1); + + this->fillJacobians(M1, M2, H1, H2); + + return fQ2 - hQ1; +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 and d=3 +template class ShonanFactor<2>; +template class ShonanFactor<3>; + +//****************************************************************************** + +} // namespace gtsam diff --git a/gtsam/sfm/ShonanFactor.h b/gtsam/sfm/ShonanFactor.h new file mode 100644 index 000000000..3c43c2c52 --- /dev/null +++ b/gtsam/sfm/ShonanFactor.h @@ -0,0 +1,91 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanFactor.h + * @date March 2019 + * @author Frank Dellaert + * @brief Main factor type in Shonan averaging, on SO(n) pairs + */ + +#pragma once + +#include +#include +#include +#include + +#include + +namespace gtsam { + +/** + * ShonanFactor is a BetweenFactor that moves in SO(p), but will + * land on the SO(d) sub-manifold of SO(p) at the global minimum. It projects + * the SO(p) matrices down to a Stiefel manifold of p*d matrices. + */ +template +class GTSAM_EXPORT ShonanFactor : public NoiseModelFactor2 { + Matrix M_; ///< measured rotation between R1 and R2 + size_t p_, pp_; ///< dimensionality constants + boost::shared_ptr G_; ///< matrix of vectorized generators + + // Select Rot2 or Rot3 interface based template parameter d + using Rot = typename std::conditional::type; + +public: + /// @name Constructor + /// @{ + + /// Constructor. Note we convert to d*p-dimensional noise model. + /// To save memory and mallocs, pass in the vectorized Lie algebra generators: + /// G = boost::make_shared(SOn::VectorizedGenerators(p)); + ShonanFactor(Key j1, Key j2, const Rot &R12, size_t p, + const SharedNoiseModel &model = nullptr, + const boost::shared_ptr &G = nullptr); + + /// @} + /// @name Testable + /// @{ + + /// print with optional string + void + print(const std::string &s, + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override; + + /// assert equality up to a tolerance + bool equals(const NonlinearFactor &expected, + double tol = 1e-9) const override; + + /// @} + /// @name NoiseModelFactor2 methods + /// @{ + + /// Error is Frobenius norm between Q1*P*R12 and Q2*P, where P=[I_3x3;0] + /// projects down from SO(p) to the Stiefel manifold of px3 matrices. + Vector + evaluateError(const SOn &Q1, const SOn &Q2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const override; + /// @} + +private: + /// Calculate Jacobians if asked, Only implemented for d=2 and 3 in .cpp + void fillJacobians(const Matrix &M1, const Matrix &M2, + boost::optional H1, + boost::optional H2) const; +}; + +// Explicit instantiation for d=2 and d=3 in .cpp file: +using ShonanFactor2 = ShonanFactor<2>; +using ShonanFactor3 = ShonanFactor<3>; + +} // namespace gtsam diff --git a/gtsam/sfm/ShonanGaugeFactor.h b/gtsam/sfm/ShonanGaugeFactor.h new file mode 100644 index 000000000..4847c5d58 --- /dev/null +++ b/gtsam/sfm/ShonanGaugeFactor.h @@ -0,0 +1,108 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanGaugeFactor.h + * @date March 2019 + * @author Frank Dellaert + * @brief Factor used in Shonan Averaging to clamp down gauge freedom + */ + +#pragma once + +#include +#include +#include + +#include + +namespace gtsam { +/** + * The ShonanGaugeFactor creates a constraint on a single SO(n) to avoid moving + * in the stabilizer. + * + * Details: SO(p) contains the p*3 Stiefel matrices of orthogonal frames: we + * take those to be the 3 columns on the left. + * The P*P skew-symmetric matrices associated with so(p) can be partitioned as + * (Appendix B in the ECCV paper): + * | [w] -K' | + * | K [g] | + * where w is the SO(3) space, K are the extra Stiefel diemnsions (wormhole!) + * and (g)amma are extra dimensions in SO(p) that do not modify the cost + * function. The latter corresponds to rotations SO(p-d), and so the first few + * values of p-d for d==3 with their corresponding dimensionality are {0:0, 1:0, + * 2:1, 3:3, ...} + * + * The ShonanGaugeFactor adds a unit Jacobian to these extra dimensions, + * essentially restricting the optimization to the Stiefel manifold. + */ +class GTSAM_EXPORT ShonanGaugeFactor : public NonlinearFactor { + // Row dimension, equal to the dimensionality of SO(p-d) + size_t rows_; + + /// Constant Jacobian + boost::shared_ptr whitenedJacobian_; + +public: + /** + * Construct from key for an SO(p) matrix, for base dimension d (2 or 3) + * If parameter gamma is given, it acts as a precision = 1/sigma^2, and + * the Jacobian will be multiplied with 1/sigma = sqrt(gamma). + */ + ShonanGaugeFactor(Key key, size_t p, size_t d = 3, + boost::optional gamma = boost::none) + : NonlinearFactor(boost::assign::cref_list_of<1>(key)) { + if (p < d) { + throw std::invalid_argument("ShonanGaugeFactor must have p>=d."); + } + // Calculate dimensions + size_t q = p - d; + size_t P = SOn::Dimension(p); // dimensionality of SO(p) + rows_ = SOn::Dimension(q); // dimensionality of SO(q), the gauge + + // Create constant Jacobian as a rows_*P matrix: there are rows_ penalized + // dimensions, but it is a bit tricky to find them among the P columns. + // The key is to look at how skew-symmetric matrices are laid out in SOn.h: + // the first tangent dimension will always be included, but beyond that we + // have to be careful. We always need to skip the d top-rows of the skew- + // symmetric matrix as they below to K, part of the Stiefel manifold. + Matrix A(rows_, P); + A.setZero(); + double invSigma = gamma ? std::sqrt(*gamma) : 1.0; + size_t i = 0, j = 0, n = p - 1 - d; + while (i < rows_) { + A.block(i, j, n, n) = invSigma * Matrix::Identity(n, n); + i += n; + j += n + d; // skip d columns + n -= 1; + } + // TODO(frank): assign the right one in the right columns + whitenedJacobian_ = + boost::make_shared(key, A, Vector::Zero(rows_)); + } + + /// Destructor + virtual ~ShonanGaugeFactor() {} + + /// Calculate the error of the factor: always zero + double error(const Values &c) const override { return 0; } + + /// get the dimension of the factor (number of rows on linearization) + size_t dim() const override { return rows_; } + + /// linearize to a GaussianFactor + boost::shared_ptr linearize(const Values &c) const override { + return whitenedJacobian_; + } +}; +// \ShonanGaugeFactor + +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sfm/TranslationFactor.h b/gtsam/sfm/TranslationFactor.h index d63633d7e..d1f85cf01 100644 --- a/gtsam/sfm/TranslationFactor.h +++ b/gtsam/sfm/TranslationFactor.h @@ -36,6 +36,8 @@ namespace gtsam { * normalized(Tb - Ta) - w_aZb.point3() * * @addtogroup SFM + * + * */ class TranslationFactor : public NoiseModelFactor2 { private: diff --git a/gtsam/sfm/TranslationRecovery.cpp b/gtsam/sfm/TranslationRecovery.cpp index aeeae688f..665336cc4 100644 --- a/gtsam/sfm/TranslationRecovery.cpp +++ b/gtsam/sfm/TranslationRecovery.cpp @@ -10,10 +10,10 @@ * -------------------------------------------------------------------------- */ /** - * @file TranslationRecovery.h + * @file TranslationRecovery.cpp * @author Frank Dellaert * @date March 2020 - * @brief test recovering translations when rotations are given. + * @brief Source code for recovering translations when rotations are given */ #include @@ -33,30 +33,25 @@ using namespace gtsam; using namespace std; NonlinearFactorGraph TranslationRecovery::buildGraph() const { - auto noiseModel = noiseModel::Isotropic::Sigma(3, 0.01); NonlinearFactorGraph graph; // Add all relative translation edges for (auto edge : relativeTranslations_) { - Key a, b; - tie(a, b) = edge.first; - const Unit3 w_aZb = edge.second; - graph.emplace_shared(a, b, w_aZb, noiseModel); + graph.emplace_shared(edge.key1(), edge.key2(), + edge.measured(), edge.noiseModel()); } return graph; } void TranslationRecovery::addPrior(const double scale, - NonlinearFactorGraph* graph) const { - auto noiseModel = noiseModel::Isotropic::Sigma(3, 0.01); + NonlinearFactorGraph *graph) const { + //TODO(akshay-krishnan): make this an input argument + auto priorNoiseModel = noiseModel::Isotropic::Sigma(3, 0.01); auto edge = relativeTranslations_.begin(); - Key a, b; - tie(a, b) = edge->first; - const Unit3 w_aZb = edge->second; - graph->emplace_shared >(a, Point3(0, 0, 0), noiseModel); - graph->emplace_shared >(b, scale * w_aZb.point3(), - noiseModel); + graph->emplace_shared >(edge->key1(), Point3(0, 0, 0), priorNoiseModel); + graph->emplace_shared >(edge->key2(), scale * edge->measured().point3(), + edge->noiseModel()); } Values TranslationRecovery::initalizeRandomly() const { @@ -71,10 +66,8 @@ Values TranslationRecovery::initalizeRandomly() const { // Loop over measurements and add a random translation for (auto edge : relativeTranslations_) { - Key a, b; - tie(a, b) = edge.first; - insert(a); - insert(b); + insert(edge.key1()); + insert(edge.key2()); } return initial; } @@ -89,7 +82,8 @@ Values TranslationRecovery::run(const double scale) const { } TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements( - const Values& poses, const vector& edges) { + const Values &poses, const vector &edges) { + auto edgeNoiseModel = noiseModel::Isotropic::Sigma(3, 0.01); TranslationEdges relativeTranslations; for (auto edge : edges) { Key a, b; @@ -97,7 +91,7 @@ TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements( const Pose3 wTa = poses.at(a), wTb = poses.at(b); const Point3 Ta = wTa.translation(), Tb = wTb.translation(); const Unit3 w_aZb(Tb - Ta); - relativeTranslations[edge] = w_aZb; + relativeTranslations.emplace_back(a, b, w_aZb, edgeNoiseModel); } return relativeTranslations; } diff --git a/gtsam/sfm/TranslationRecovery.h b/gtsam/sfm/TranslationRecovery.h index bb3c3cdb1..4c6a95cbe 100644 --- a/gtsam/sfm/TranslationRecovery.h +++ b/gtsam/sfm/TranslationRecovery.h @@ -13,15 +13,16 @@ * @file TranslationRecovery.h * @author Frank Dellaert * @date March 2020 - * @brief test recovering translations when rotations are given. + * @brief Recovering translations in an epipolar graph when rotations are given. */ #include #include #include +#include -#include #include +#include namespace gtsam { @@ -48,7 +49,7 @@ namespace gtsam { class TranslationRecovery { public: using KeyPair = std::pair; - using TranslationEdges = std::map; + using TranslationEdges = std::vector>; private: TranslationEdges relativeTranslations_; @@ -59,13 +60,14 @@ class TranslationRecovery { * @brief Construct a new Translation Recovery object * * @param relativeTranslations the relative translations, in world coordinate - * frames, indexed in a map by a pair of Pose keys. + * frames, vector of BinaryMeasurements of Unit3, where each key of a measurement + * is a point in 3D. * @param lmParams (optional) gtsam::LavenbergMarquardtParams that can be * used to modify the parameters for the LM optimizer. By default, uses the * default LM parameters. */ - TranslationRecovery(const TranslationEdges& relativeTranslations, - const LevenbergMarquardtParams& lmParams = LevenbergMarquardtParams()) + TranslationRecovery(const TranslationEdges &relativeTranslations, + const LevenbergMarquardtParams &lmParams = LevenbergMarquardtParams()) : relativeTranslations_(relativeTranslations), params_(lmParams) { params_.setVerbosityLM("Summary"); } @@ -83,7 +85,7 @@ class TranslationRecovery { * @param scale scale for first relative translation which fixes gauge. * @param graph factor graph to which prior is added. */ - void addPrior(const double scale, NonlinearFactorGraph* graph) const; + void addPrior(const double scale, NonlinearFactorGraph *graph) const; /** * @brief Create random initial translations. @@ -105,10 +107,11 @@ class TranslationRecovery { * * @param poses SE(3) ground truth poses stored as Values * @param edges pairs (a,b) for which a measurement w_aZb will be generated. - * @return TranslationEdges map from a KeyPair to the simulated Unit3 - * translation direction measurement between the cameras in KeyPair. + * @return TranslationEdges vector of binary measurements where the keys are + * the cameras and the measurement is the simulated Unit3 translation + * direction between the cameras. */ static TranslationEdges SimulateMeasurements( - const Values& poses, const std::vector& edges); + const Values &poses, const std::vector &edges); }; } // namespace gtsam diff --git a/gtsam/sfm/tests/testBinaryMeasurement.cpp b/gtsam/sfm/tests/testBinaryMeasurement.cpp new file mode 100644 index 000000000..3dd81c2c1 --- /dev/null +++ b/gtsam/sfm/tests/testBinaryMeasurement.cpp @@ -0,0 +1,70 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testBinaryMeasurement.cpp + * @brief Unit tests for BinaryMeasurement class + * @author Akshay Krishnan + * @date July 2020 + */ + +#include + +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +static const Key kKey1(2), kKey2(1); + +// Create noise models for unit3 and rot3 +static SharedNoiseModel unit3_model(noiseModel::Isotropic::Sigma(2, 0.05)); +static SharedNoiseModel rot3_model(noiseModel::Isotropic::Sigma(3, 0.05)); + +const Unit3 unit3Measured(Vector3(1, 1, 1)); +const Rot3 rot3Measured; + +TEST(BinaryMeasurement, Unit3) { + BinaryMeasurement unit3Measurement(kKey1, kKey2, unit3Measured, + unit3_model); + EXPECT_LONGS_EQUAL(unit3Measurement.key1(), kKey1); + EXPECT_LONGS_EQUAL(unit3Measurement.key2(), kKey2); + EXPECT(unit3Measurement.measured().equals(unit3Measured)); + + BinaryMeasurement unit3MeasurementCopy(kKey1, kKey2, unit3Measured, + unit3_model); + EXPECT(unit3Measurement.equals(unit3MeasurementCopy)); +} + +TEST(BinaryMeasurement, Rot3) { + // testing the accessors + BinaryMeasurement rot3Measurement(kKey1, kKey2, rot3Measured, + rot3_model); + EXPECT_LONGS_EQUAL(rot3Measurement.key1(), kKey1); + EXPECT_LONGS_EQUAL(rot3Measurement.key2(), kKey2); + EXPECT(rot3Measurement.measured().equals(rot3Measured)); + + // testing the equals function + BinaryMeasurement rot3MeasurementCopy(kKey1, kKey2, rot3Measured, + rot3_model); + EXPECT(rot3Measurement.equals(rot3MeasurementCopy)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanAveraging.cpp b/gtsam/sfm/tests/testShonanAveraging.cpp new file mode 100644 index 000000000..bc1449747 --- /dev/null +++ b/gtsam/sfm/tests/testShonanAveraging.cpp @@ -0,0 +1,360 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanAveraging.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Unit tests for Shonan Averaging algorithm + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +template +using Rot = typename std::conditional::type; + +template +using Pose = typename std::conditional::type; + +ShonanAveraging3 fromExampleName( + const std::string &name, + ShonanAveraging3::Parameters parameters = ShonanAveraging3::Parameters()) { + string g2oFile = findExampleDataFile(name); + return ShonanAveraging3(g2oFile, parameters); +} + +static const ShonanAveraging3 kShonan = fromExampleName("toyExample.g2o"); + +static std::mt19937 kRandomNumberGenerator(42); + +/* ************************************************************************* */ +TEST(ShonanAveraging3, checkConstructor) { + EXPECT_LONGS_EQUAL(5, kShonan.nrUnknowns()); + + EXPECT_LONGS_EQUAL(15, kShonan.D().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.D().cols()); + auto D = kShonan.denseD(); + EXPECT_LONGS_EQUAL(15, D.rows()); + EXPECT_LONGS_EQUAL(15, D.cols()); + + EXPECT_LONGS_EQUAL(15, kShonan.Q().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.Q().cols()); + auto Q = kShonan.denseQ(); + EXPECT_LONGS_EQUAL(15, Q.rows()); + EXPECT_LONGS_EQUAL(15, Q.cols()); + + EXPECT_LONGS_EQUAL(15, kShonan.L().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.L().cols()); + auto L = kShonan.denseL(); + EXPECT_LONGS_EQUAL(15, L.rows()); + EXPECT_LONGS_EQUAL(15, L.cols()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, buildGraphAt) { + auto graph = kShonan.buildGraphAt(5); + EXPECT_LONGS_EQUAL(7, graph.size()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, checkOptimality) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(4, randomRotations); // lift to 4! + auto Lambda = kShonan.computeLambda(random); + EXPECT_LONGS_EQUAL(15, Lambda.rows()); + EXPECT_LONGS_EQUAL(15, Lambda.cols()); + EXPECT_LONGS_EQUAL(45, Lambda.nonZeros()); + auto lambdaMin = kShonan.computeMinEigenValue(random); + // EXPECT_DOUBLES_EQUAL(-5.2964625490657866, lambdaMin, + // 1e-4); // Regression test + EXPECT_DOUBLES_EQUAL(-414.87376657555996, lambdaMin, + 1e-4); // Regression test + EXPECT(!kShonan.checkOptimality(random)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, tryOptimizingAt3) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values initial = ShonanAveraging3::LiftTo(3, randomRotations); // convert to SOn + EXPECT(!kShonan.checkOptimality(initial)); + const Values result = kShonan.tryOptimizingAt(3, initial); + EXPECT(kShonan.checkOptimality(result)); + auto lambdaMin = kShonan.computeMinEigenValue(result); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, lambdaMin, + 1e-4); // Regression test + EXPECT_DOUBLES_EQUAL(0, kShonan.costAt(3, result), 1e-4); + const Values SO3Values = kShonan.roundSolution(result); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(SO3Values), 1e-4); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, tryOptimizingAt4) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(4, randomRotations); // lift to 4! + const Values result = kShonan.tryOptimizingAt(4, random); + EXPECT(kShonan.checkOptimality(result)); + EXPECT_DOUBLES_EQUAL(0, kShonan.costAt(4, result), 1e-3); + auto lambdaMin = kShonan.computeMinEigenValue(result); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, lambdaMin, + 1e-4); // Regression test + const Values SO3Values = kShonan.roundSolution(result); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(SO3Values), 1e-4); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, MakeATangentVector) { + Vector9 v; + v << 1, 2, 3, 4, 5, 6, 7, 8, 9; + Matrix expected(5, 5); + expected << 0, 0, 0, 0, -4, // + 0, 0, 0, 0, -5, // + 0, 0, 0, 0, -6, // + 0, 0, 0, 0, 0, // + 4, 5, 6, 0, 0; + const Vector xi_1 = ShonanAveraging3::MakeATangentVector(5, v, 1); + const auto actual = SOn::Hat(xi_1); + CHECK(assert_equal(expected, actual)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, LiftTo) { + auto I = genericValue(Rot3()); + Values initial{{0, I}, {1, I}, {2, I}}; + Values lifted = ShonanAveraging3::LiftTo(5, initial); + EXPECT(assert_equal(SOn(5), lifted.at(0))); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, CheckWithEigen) { + // control randomness + static std::mt19937 rng(0); + Vector descentDirection = Vector::Random(15); // for use below + const Values randomRotations = kShonan.initializeRandomly(rng); + Values random = ShonanAveraging3::LiftTo(3, randomRotations); + + // Optimize + const Values Qstar3 = kShonan.tryOptimizingAt(3, random); + + // Compute Eigenvalue with Spectra solver + double lambda = kShonan.computeMinEigenValue(Qstar3); + + // Check Eigenvalue with slow Eigen version, converts matrix A to dense matrix! + const Matrix S = ShonanAveraging3::StiefelElementMatrix(Qstar3); + auto A = kShonan.computeA(S); + bool computeEigenvectors = false; + Eigen::EigenSolver eigenSolver(Matrix(A), computeEigenvectors); + auto lambdas = eigenSolver.eigenvalues().real(); + double minEigenValue = lambdas(0); + for (int i = 1; i < lambdas.size(); i++) + minEigenValue = min(lambdas(i), minEigenValue); + + // Actual check + EXPECT_DOUBLES_EQUAL(minEigenValue, lambda, 1e-12); + + // Construct test descent direction (as minEigenVector is not predictable + // across platforms, being one from a basically flat 3d- subspace) + + // Check descent + Values initialQ4 = + ShonanAveraging3::LiftwithDescent(4, Qstar3, descentDirection); + EXPECT_LONGS_EQUAL(5, initialQ4.size()); + + // TODO(frank): uncomment this regression test: currently not repeatable + // across platforms. + // Matrix expected(4, 4); + // expected << 0.0459224, -0.688689, -0.216922, 0.690321, // + // 0.92381, 0.191931, 0.255854, 0.21042, // + // -0.376669, 0.301589, 0.687953, 0.542111, // + // -0.0508588, 0.630804, -0.643587, 0.43046; + // EXPECT(assert_equal(SOn(expected), initialQ4.at(0), 1e-5)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, initializeWithDescent) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(3, randomRotations); + const Values Qstar3 = kShonan.tryOptimizingAt(3, random); + Vector minEigenVector; + double lambdaMin = kShonan.computeMinEigenValue(Qstar3, &minEigenVector); + Values initialQ4 = + kShonan.initializeWithDescent(4, Qstar3, minEigenVector, lambdaMin); + EXPECT_LONGS_EQUAL(5, initialQ4.size()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, run) { + auto initial = kShonan.initializeRandomly(kRandomNumberGenerator); + auto result = kShonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(result.first), 1e-3); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, result.second, + 1e-4); // Regression test +} + +/* ************************************************************************* */ +namespace klaus { +// The data in the file is the Colmap solution +const Rot3 wR0(0.9992281076190063, -0.02676080288219576, -0.024497002638379624, + -0.015064701622500615); +const Rot3 wR1(0.998239108728862, -0.049543805396343954, -0.03232420352077356, + -0.004386230477751116); +const Rot3 wR2(0.9925378735259738, -0.07993768981394891, 0.0825062894866454, + -0.04088089479075661); +} // namespace klaus + +TEST(ShonanAveraging3, runKlaus) { + using namespace klaus; + + // Initialize a Shonan instance without the Karcher mean + ShonanAveraging3::Parameters parameters; + parameters.setKarcherWeight(0); + + // Load 3 pose example taken in Klaus by Shicong + static const ShonanAveraging3 shonan = + fromExampleName("Klaus3.g2o", parameters); + + // Check nr poses + EXPECT_LONGS_EQUAL(3, shonan.nrUnknowns()); + + // Colmap uses the Y-down vision frame, and the first 3 rotations are close to + // identity. We check that below. Note tolerance is quite high. + static const Rot3 identity; + EXPECT(assert_equal(identity, wR0, 0.2)); + EXPECT(assert_equal(identity, wR1, 0.2)); + EXPECT(assert_equal(identity, wR2, 0.2)); + + // Get measurements + const Rot3 R01 = shonan.measured(0); + const Rot3 R12 = shonan.measured(1); + const Rot3 R02 = shonan.measured(2); + + // Regression test to make sure data did not change. + EXPECT(assert_equal(Rot3(0.9995433591728293, -0.022048798853273946, + -0.01796327847857683, 0.010210006313668573), + R01)); + + // Check Colmap solution agrees OK with relative rotation measurements. + EXPECT(assert_equal(R01, wR0.between(wR1), 0.1)); + EXPECT(assert_equal(R12, wR1.between(wR2), 0.1)); + EXPECT(assert_equal(R02, wR0.between(wR2), 0.1)); + + // Run Shonan (with prior on first rotation) + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, shonan.cost(result.first), 1e-2); + EXPECT_DOUBLES_EQUAL(-9.2259161494467889e-05, result.second, + 1e-4); // Regression + + // Get Shonan solution in new frame R (R for result) + const Rot3 rR0 = result.first.at(0); + const Rot3 rR1 = result.first.at(1); + const Rot3 rR2 = result.first.at(2); + + // rR0 = rRw * wR0 => rRw = rR0 * wR0.inverse() + // rR1 = rRw * wR1 + // rR2 = rRw * wR2 + + const Rot3 rRw = rR0 * wR0.inverse(); + EXPECT(assert_equal(rRw * wR1, rR1, 0.1)) + EXPECT(assert_equal(rRw * wR2, rR2, 0.1)) +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, runKlausKarcher) { + using namespace klaus; + + // Load 3 pose example taken in Klaus by Shicong + static const ShonanAveraging3 shonan = fromExampleName("Klaus3.g2o"); + + // Run Shonan (with Karcher mean prior) + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, shonan.cost(result.first), 1e-2); + EXPECT_DOUBLES_EQUAL(-1.361402670507772e-05, result.second, + 1e-4); // Regression test + + // Get Shonan solution in new frame R (R for result) + const Rot3 rR0 = result.first.at(0); + const Rot3 rR1 = result.first.at(1); + const Rot3 rR2 = result.first.at(2); + + const Rot3 rRw = rR0 * wR0.inverse(); + EXPECT(assert_equal(rRw * wR1, rR1, 0.1)) + EXPECT(assert_equal(rRw * wR2, rR2, 0.1)) +} + +/* ************************************************************************* */ +TEST(ShonanAveraging2, noisyToyGraph) { + // Load 2D toy example + auto lmParams = LevenbergMarquardtParams::CeresDefaults(); + // lmParams.setVerbosityLM("SUMMARY"); + string g2oFile = findExampleDataFile("noisyToyGraph.txt"); + ShonanAveraging2::Parameters parameters(lmParams); + auto measurements = parseMeasurements(g2oFile); + ShonanAveraging2 shonan(measurements, parameters); + EXPECT_LONGS_EQUAL(4, shonan.nrUnknowns()); + + // Check graph building + NonlinearFactorGraph graph = shonan.buildGraphAt(2); + EXPECT_LONGS_EQUAL(6, graph.size()); + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 2); + EXPECT_DOUBLES_EQUAL(0.0008211, shonan.cost(result.first), 1e-6); + EXPECT_DOUBLES_EQUAL(0, result.second, 1e-10); // certificate! +} + +/* ************************************************************************* */ +// Test alpha/beta/gamma prior weighting. +TEST(ShonanAveraging3, PriorWeights) { + auto lmParams = LevenbergMarquardtParams::CeresDefaults(); + ShonanAveraging3::Parameters params(lmParams); + EXPECT_DOUBLES_EQUAL(0, params.alpha, 1e-9); + EXPECT_DOUBLES_EQUAL(1, params.beta, 1e-9); + EXPECT_DOUBLES_EQUAL(0, params.gamma, 1e-9); + double alpha = 100.0, beta = 200.0, gamma = 300.0; + params.setAnchorWeight(alpha); + params.setKarcherWeight(beta); + params.setGaugesWeight(gamma); + EXPECT_DOUBLES_EQUAL(alpha, params.alpha, 1e-9); + EXPECT_DOUBLES_EQUAL(beta, params.beta, 1e-9); + EXPECT_DOUBLES_EQUAL(gamma, params.gamma, 1e-9); + params.setKarcherWeight(0); + static const ShonanAveraging3 shonan = fromExampleName("Klaus3.g2o", params); + for (auto i : {0,1,2}) { + const auto& m = shonan.measurement(i); + auto isotropic = + boost::static_pointer_cast(m.noiseModel()); + CHECK(isotropic != nullptr); + EXPECT_LONGS_EQUAL(3, isotropic->dim()); + EXPECT_DOUBLES_EQUAL(0.2, isotropic->sigma(), 1e-9); + } + auto I = genericValue(Rot3()); + Values initial{{0, I}, {1, I}, {2, I}}; + EXPECT_DOUBLES_EQUAL(3.0756, shonan.cost(initial), 1e-4); + auto result = shonan.run(initial, 3, 3); + EXPECT_DOUBLES_EQUAL(0.0015, shonan.cost(result.first), 1e-4); +} +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanFactor.cpp b/gtsam/sfm/tests/testShonanFactor.cpp new file mode 100644 index 000000000..ef94c5cf4 --- /dev/null +++ b/gtsam/sfm/tests/testShonanFactor.cpp @@ -0,0 +1,121 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * testFrobeniusFactor.cpp + * + * @file testFrobeniusFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Check evaluateError for various Frobenius norm + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +//****************************************************************************** +namespace so3 { +SO3 id; +Vector3 v1 = (Vector(3) << 0.1, 0, 0).finished(); +SO3 R1 = SO3::Expmap(v1); +Vector3 v2 = (Vector(3) << 0.01, 0.02, 0.03).finished(); +SO3 R2 = SO3::Expmap(v2); +SO3 R12 = R1.between(R2); +} // namespace so3 + +//****************************************************************************** +namespace submanifold { +SO4 id; +Vector6 v1 = (Vector(6) << 0, 0, 0, 0.1, 0, 0).finished(); +SO3 R1 = SO3::Expmap(v1.tail<3>()); +SO4 Q1 = SO4::Expmap(v1); +Vector6 v2 = (Vector(6) << 0, 0, 0, 0.01, 0.02, 0.03).finished(); +SO3 R2 = SO3::Expmap(v2.tail<3>()); +SO4 Q2 = SO4::Expmap(v2); +SO3 R12 = R1.between(R2); +} // namespace submanifold + +/* ************************************************************************* */ +TEST(ShonanFactor3, evaluateError) { + auto model = noiseModel::Isotropic::Sigma(3, 1.2); + for (const size_t p : {5, 4, 3}) { + Matrix M = Matrix::Identity(p, p); + M.topLeftCorner(3, 3) = submanifold::R1.matrix(); + SOn Q1(M); + M.topLeftCorner(3, 3) = submanifold::R2.matrix(); + SOn Q2(M); + auto factor = ShonanFactor3(1, 2, Rot3(::so3::R12.matrix()), p, model); + Matrix H1, H2; + factor.evaluateError(Q1, Q2, H1, H2); + + // Test derivatives + Values values; + values.insert(1, Q1); + values.insert(2, Q2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); + } +} + +/* ************************************************************************* */ +TEST(ShonanFactor3, equivalenceToSO3) { + using namespace ::submanifold; + auto R12 = ::so3::R12.retract(Vector3(0.1, 0.2, -0.1)); + auto model = noiseModel::Isotropic::Sigma(6, 1.2); // wrong dimension + auto factor3 = FrobeniusBetweenFactor(1, 2, R12, model); + auto factor4 = ShonanFactor3(1, 2, Rot3(R12.matrix()), 4, model); + const Matrix3 E3(factor3.evaluateError(R1, R2).data()); + const Matrix43 E4( + factor4.evaluateError(SOn(Q1.matrix()), SOn(Q2.matrix())).data()); + EXPECT(assert_equal((Matrix)E4.topLeftCorner<3, 3>(), E3, 1e-9)); + EXPECT(assert_equal((Matrix)E4.row(3), Matrix13::Zero(), 1e-9)); +} + +/* ************************************************************************* */ +TEST(ShonanFactor2, evaluateError) { + auto model = noiseModel::Isotropic::Sigma(1, 1.2); + const Rot2 R1(0.3), R2(0.5), R12(0.2); + for (const size_t p : {5, 4, 3, 2}) { + Matrix M = Matrix::Identity(p, p); + M.topLeftCorner(2, 2) = R1.matrix(); + SOn Q1(M); + M.topLeftCorner(2, 2) = R2.matrix(); + SOn Q2(M); + auto factor = ShonanFactor2(1, 2, R12, p, model); + Matrix H1, H2; + factor.evaluateError(Q1, Q2, H1, H2); + + // Test derivatives + Values values; + values.insert(1, Q1); + values.insert(2, Q2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); + } +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanGaugeFactor.cpp b/gtsam/sfm/tests/testShonanGaugeFactor.cpp new file mode 100644 index 000000000..344394b9c --- /dev/null +++ b/gtsam/sfm/tests/testShonanGaugeFactor.cpp @@ -0,0 +1,106 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanGaugeFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Unit tests for ShonanGaugeFactor class + */ + +#include + +#include + +#include +#include + +using namespace std; +using namespace gtsam; + +/* ************************************************************************* */ +// Check dimensions of all low-dim GaugeFactors +TEST(ShonanAveraging, GaugeFactorLows) { + constexpr Key key(123); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 2, 2).dim()); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 3, 2).dim()); + EXPECT_LONGS_EQUAL(1, ShonanGaugeFactor(key, 4, 2).dim()); // SO(4-2) -> 1 + EXPECT_LONGS_EQUAL(3, ShonanGaugeFactor(key, 5, 2).dim()); // SO(5-2) -> 3 + + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 3, 3).dim()); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 4, 3).dim()); + EXPECT_LONGS_EQUAL(1, ShonanGaugeFactor(key, 5, 3).dim()); // SO(5-3) -> 1 +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(6) +TEST(ShonanAveraging, GaugeFactorSO6) { + constexpr Key key(666); + ShonanGaugeFactor factor(key, 6); // For SO(6) + Matrix A = Matrix::Zero(3, 15); // SO(6-3) = SO(3) == 3-dimensional gauge + A(0, 0) = 1; // first 2 of 6^th skew column, which has 5 non-zero entries + A(1, 1) = 1; // then we skip 3 tangent dimensions + A(2, 5) = 1; // first of 5th skew colum, which has 4 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(3)); + Values values; + EXPECT_LONGS_EQUAL(3, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(7) +TEST(ShonanAveraging, GaugeFactorSO7) { + constexpr Key key(777); + ShonanGaugeFactor factor(key, 7); // For SO(7) + Matrix A = Matrix::Zero(6, 21); // SO(7-3) = SO(4) == 6-dimensional gauge + A(0, 0) = 1; // first 3 of 7^th skew column, which has 6 non-zero entries + A(1, 1) = 1; + A(2, 2) = 1; // then we skip 3 tangent dimensions + A(3, 6) = 1; // first 2 of 6^th skew column, which has 5 non-zero entries + A(4, 7) = 1; // then we skip 3 tangent dimensions + A(5, 11) = 1; // first of 5th skew colum, which has 4 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(6)); + Values values; + EXPECT_LONGS_EQUAL(6, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(6), with base SO(2) +TEST(ShonanAveraging, GaugeFactorSO6over2) { + constexpr Key key(602); + double gamma = 4; + ShonanGaugeFactor factor(key, 6, 2, gamma); // For SO(6), base SO(2) + Matrix A = Matrix::Zero(6, 15); // SO(6-2) = SO(4) == 6-dimensional gauge + A(0, 0) = 2; // first 3 of 6^th skew column, which has 5 non-zero entries + A(1, 1) = 2; + A(2, 2) = 2; // then we skip only 2 tangent dimensions + A(3, 5) = 2; // first 2 of 5^th skew column, which has 4 non-zero entries + A(4, 6) = 2; // then we skip only 2 tangent dimensions + A(5, 9) = 2; // first of 4th skew colum, which has 3 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(6)); + Values values; + EXPECT_LONGS_EQUAL(6, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testTranslationFactor.cpp b/gtsam/sfm/tests/testTranslationFactor.cpp index 37e8b6c0f..3ff76ac5c 100644 --- a/gtsam/sfm/tests/testTranslationFactor.cpp +++ b/gtsam/sfm/tests/testTranslationFactor.cpp @@ -51,8 +51,6 @@ TEST(TranslationFactor, ZeroError) { // Verify we get the expected error Vector expected = (Vector3() << 0, 0, 0).finished(); EXPECT(assert_equal(expected, actualError, 1e-9)); - - } /* ************************************************************************* */ @@ -67,13 +65,13 @@ TEST(TranslationFactor, NonZeroError) { Vector actualError(factor.evaluateError(T1, T2)); // verify we get the expected error - Vector expected = (Vector3() << -1, 1/sqrt(2), 1/sqrt(2)).finished(); + Vector expected = (Vector3() << -1, 1 / sqrt(2), 1 / sqrt(2)).finished(); EXPECT(assert_equal(expected, actualError, 1e-9)); } /* ************************************************************************* */ -Vector factorError(const Point3& T1, const Point3& T2, - const TranslationFactor& factor) { +Vector factorError(const Point3 &T1, const Point3 &T2, + const TranslationFactor &factor) { return factor.evaluateError(T1, T2); } diff --git a/gtsam/slam/BetweenFactor.h b/gtsam/slam/BetweenFactor.h index 9afc2f72b..a594e95d5 100644 --- a/gtsam/slam/BetweenFactor.h +++ b/gtsam/slam/BetweenFactor.h @@ -21,6 +21,14 @@ #include #include +#ifdef _WIN32 +#define BETWEENFACTOR_VISIBILITY +#else +// This will trigger a LNKxxxx on MSVC, so disable for MSVC build +// Please refer to https://github.com/borglab/gtsam/blob/develop/Using-GTSAM-EXPORT.md +#define BETWEENFACTOR_VISIBILITY GTSAM_EXPORT +#endif + namespace gtsam { /** diff --git a/gtsam/slam/FrobeniusFactor.cpp b/gtsam/slam/FrobeniusFactor.cpp index 6981a6a1c..5697a0cd6 100644 --- a/gtsam/slam/FrobeniusFactor.cpp +++ b/gtsam/slam/FrobeniusFactor.cpp @@ -18,117 +18,38 @@ #include -#include -#include -#include - -#include -#include -#include - using namespace std; namespace gtsam { //****************************************************************************** -boost::shared_ptr ConvertPose3NoiseModel( - const SharedNoiseModel& model, size_t d, bool defaultToUnit) { +boost::shared_ptr +ConvertNoiseModel(const SharedNoiseModel &model, size_t d, bool defaultToUnit) { double sigma = 1.0; if (model != nullptr) { - if (model->dim() != 6) { - if (!defaultToUnit) - throw std::runtime_error("Can only convert Pose3 noise models"); - } else { - auto sigmas = model->sigmas().head(3).eval(); - if (sigmas(1) != sigmas(0) || sigmas(2) != sigmas(0)) { - if (!defaultToUnit) + auto sigmas = model->sigmas(); + size_t n = sigmas.size(); + if (n == 1) { + sigma = sigmas(0); // Rot2 + goto exit; + } + if (n == 3 || n == 6) { + sigma = sigmas(2); // Pose2, Rot3, or Pose3 + if (sigmas(0) != sigma || sigmas(1) != sigma) { + if (!defaultToUnit) { throw std::runtime_error("Can only convert isotropic rotation noise"); - } else { - sigma = sigmas(0); + } } + goto exit; + } + if (!defaultToUnit) { + throw std::runtime_error("Can only convert Pose2/Pose3 noise models"); } } +exit: return noiseModel::Isotropic::Sigma(d, sigma); } //****************************************************************************** -FrobeniusWormholeFactor::FrobeniusWormholeFactor( - Key j1, Key j2, const Rot3 &R12, size_t p, const SharedNoiseModel &model, - const boost::shared_ptr &G) - : NoiseModelFactor2(ConvertPose3NoiseModel(model, p * 3), j1, j2), - M_(R12.matrix()), // 3*3 in all cases - p_(p), // 4 for SO(4) - pp_(p * p), // 16 for SO(4) - G_(G) { - if (noiseModel()->dim() != 3 * p_) - throw std::invalid_argument( - "FrobeniusWormholeFactor: model with incorrect dimension."); - if (!G) { - G_ = boost::make_shared(); - *G_ = SOn::VectorizedGenerators(p); // expensive! - } - if (G_->rows() != pp_ || G_->cols() != SOn::Dimension(p)) - throw std::invalid_argument("FrobeniusWormholeFactor: passed in generators " - "of incorrect dimension."); -} -//****************************************************************************** -void FrobeniusWormholeFactor::print(const std::string &s, const KeyFormatter &keyFormatter) const { - std::cout << s << "FrobeniusWormholeFactor<" << p_ << ">(" << keyFormatter(key1()) << "," - << keyFormatter(key2()) << ")\n"; - traits::Print(M_, " M: "); - noiseModel_->print(" noise model: "); -} - -//****************************************************************************** -bool FrobeniusWormholeFactor::equals(const NonlinearFactor &expected, - double tol) const { - auto e = dynamic_cast(&expected); - return e != nullptr && NoiseModelFactor2::equals(*e, tol) && - p_ == e->p_ && M_ == e->M_; -} - -//****************************************************************************** -Vector FrobeniusWormholeFactor::evaluateError( - const SOn& Q1, const SOn& Q2, boost::optional H1, - boost::optional H2) const { - gttic(FrobeniusWormholeFactorP_evaluateError); - - const Matrix& M1 = Q1.matrix(); - const Matrix& M2 = Q2.matrix(); - assert(M1.rows() == p_ && M2.rows() == p_); - - const size_t dim = 3 * p_; // Stiefel manifold dimension - Vector fQ2(dim), hQ1(dim); - - // Vectorize and extract only d leftmost columns, i.e. vec(M2*P) - fQ2 << Eigen::Map(M2.data(), dim, 1); - - // Vectorize M1*P*R12 - const Matrix Q1PR12 = M1.leftCols<3>() * M_; - hQ1 << Eigen::Map(Q1PR12.data(), dim, 1); - - // If asked, calculate Jacobian as (M \otimes M1) * G - if (H1) { - const size_t p2 = 2 * p_; - Matrix RPxQ = Matrix::Zero(dim, pp_); - RPxQ.block(0, 0, p_, dim) << M1 * M_(0, 0), M1 * M_(1, 0), M1 * M_(2, 0); - RPxQ.block(p_, 0, p_, dim) << M1 * M_(0, 1), M1 * M_(1, 1), M1 * M_(2, 1); - RPxQ.block(p2, 0, p_, dim) << M1 * M_(0, 2), M1 * M_(1, 2), M1 * M_(2, 2); - *H1 = -RPxQ * (*G_); - } - if (H2) { - const size_t p2 = 2 * p_; - Matrix PxQ = Matrix::Zero(dim, pp_); - PxQ.block(0, 0, p_, p_) = M2; - PxQ.block(p_, p_, p_, p_) = M2; - PxQ.block(p2, p2, p_, p_) = M2; - *H2 = PxQ * (*G_); - } - - return fQ2 - hQ1; -} - -//****************************************************************************** - -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/slam/FrobeniusFactor.h b/gtsam/slam/FrobeniusFactor.h index f254b4b81..1fc37c785 100644 --- a/gtsam/slam/FrobeniusFactor.h +++ b/gtsam/slam/FrobeniusFactor.h @@ -18,6 +18,7 @@ #pragma once +#include #include #include #include @@ -25,23 +26,24 @@ namespace gtsam { /** - * When creating (any) FrobeniusFactor we convert a 6-dimensional Pose3 - * BetweenFactor noise model into an 9 or 16-dimensional isotropic noise + * When creating (any) FrobeniusFactor we can convert a Rot/Pose + * BetweenFactor noise model into a n-dimensional isotropic noise * model used to weight the Frobenius norm. If the noise model passed is - * null we return a Dim-dimensional isotropic noise model with sigma=1.0. If - * not, we we check if the 3-dimensional noise model on rotations is - * isotropic. If it is, we extend to 'Dim' dimensions, otherwise we throw an + * null we return a n-dimensional isotropic noise model with sigma=1.0. If + * not, we we check if the d-dimensional noise model on rotations is + * isotropic. If it is, we extend to 'n' dimensions, otherwise we throw an * error. If defaultToUnit == false throws an exception on unexepcted input. */ - GTSAM_EXPORT boost::shared_ptr ConvertPose3NoiseModel( - const SharedNoiseModel& model, size_t d, bool defaultToUnit = true); +GTSAM_EXPORT boost::shared_ptr +ConvertNoiseModel(const SharedNoiseModel &model, size_t n, + bool defaultToUnit = true); /** * FrobeniusPrior calculates the Frobenius norm between a given matrix and an * element of SO(3) or SO(4). */ template -class FrobeniusPrior : public NoiseModelFactor1 { +class GTSAM_EXPORT FrobeniusPrior : public NoiseModelFactor1 { enum { Dim = Rot::VectorN2::RowsAtCompileTime }; using MatrixNN = typename Rot::MatrixNN; Eigen::Matrix vecM_; ///< vectorized matrix to approximate @@ -50,7 +52,7 @@ class FrobeniusPrior : public NoiseModelFactor1 { /// Constructor FrobeniusPrior(Key j, const MatrixNN& M, const SharedNoiseModel& model = nullptr) - : NoiseModelFactor1(ConvertPose3NoiseModel(model, Dim), j) { + : NoiseModelFactor1(ConvertNoiseModel(model, Dim), j) { vecM_ << Eigen::Map(M.data(), Dim, 1); } @@ -66,13 +68,13 @@ class FrobeniusPrior : public NoiseModelFactor1 { * The template argument can be any fixed-size SO. */ template -class FrobeniusFactor : public NoiseModelFactor2 { +class GTSAM_EXPORT FrobeniusFactor : public NoiseModelFactor2 { enum { Dim = Rot::VectorN2::RowsAtCompileTime }; public: /// Constructor FrobeniusFactor(Key j1, Key j2, const SharedNoiseModel& model = nullptr) - : NoiseModelFactor2(ConvertPose3NoiseModel(model, Dim), j1, + : NoiseModelFactor2(ConvertNoiseModel(model, Dim), j1, j2) {} /// Error is just Frobenius norm between rotation matrices. @@ -92,7 +94,7 @@ class FrobeniusFactor : public NoiseModelFactor2 { * and in fact only SO3 and SO4 really work, as we need SO::AdjointMap. */ template -GTSAM_EXPORT class FrobeniusBetweenFactor : public NoiseModelFactor2 { +class GTSAM_EXPORT FrobeniusBetweenFactor : public NoiseModelFactor2 { Rot R12_; ///< measured rotation between R1 and R2 Eigen::Matrix R2hat_H_R1_; ///< fixed derivative of R2hat wrpt R1 @@ -106,7 +108,7 @@ GTSAM_EXPORT class FrobeniusBetweenFactor : public NoiseModelFactor2 { FrobeniusBetweenFactor(Key j1, Key j2, const Rot& R12, const SharedNoiseModel& model = nullptr) : NoiseModelFactor2( - ConvertPose3NoiseModel(model, Dim), j1, j2), + ConvertNoiseModel(model, Dim), j1, j2), R12_(R12), R2hat_H_R1_(R12.inverse().AdjointMap()) {} @@ -150,52 +152,4 @@ GTSAM_EXPORT class FrobeniusBetweenFactor : public NoiseModelFactor2 { /// @} }; -/** - * FrobeniusWormholeFactor is a BetweenFactor that moves in SO(p), but will - * land on the SO(3) sub-manifold of SO(p) at the global minimum. It projects - * the SO(p) matrices down to a Stiefel manifold of p*d matrices. - * TODO(frank): template on D=2 or 3 - */ -class GTSAM_EXPORT FrobeniusWormholeFactor - : public NoiseModelFactor2 { - Matrix M_; ///< measured rotation between R1 and R2 - size_t p_, pp_; ///< dimensionality constants - boost::shared_ptr G_; ///< matrix of vectorized generators - -public: - /// @name Constructor - /// @{ - - /// Constructor. Note we convert to 3*p-dimensional noise model. - /// To save memory and mallocs, pass in the vectorized Lie algebra generators: - /// G = boost::make_shared(SOn::VectorizedGenerators(p)); - FrobeniusWormholeFactor(Key j1, Key j2, const Rot3 &R12, size_t p = 4, - const SharedNoiseModel &model = nullptr, - const boost::shared_ptr &G = nullptr); - - /// @} - /// @name Testable - /// @{ - - /// print with optional string - void - print(const std::string &s, - const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override; - - /// assert equality up to a tolerance - bool equals(const NonlinearFactor &expected, - double tol = 1e-9) const override; - - /// @} - /// @name NoiseModelFactor2 methods - /// @{ - - /// Error is Frobenius norm between Q1*P*R12 and Q2*P, where P=[I_3x3;0] - /// projects down from SO(p) to the Stiefel manifold of px3 matrices. - Vector evaluateError(const SOn& Q1, const SOn& Q2, - boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const override; - /// @} -}; - } // namespace gtsam diff --git a/gtsam/slam/InitializePose.h b/gtsam/slam/InitializePose.h new file mode 100644 index 000000000..be249b0b5 --- /dev/null +++ b/gtsam/slam/InitializePose.h @@ -0,0 +1,99 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file InitializePose.h + * @author Frank Dellaert + * @date August, 2020 + * @brief common code between lago.* (2D) and InitializePose3.* (3D) + */ + +#pragma once + +#include +#include +#include +#include +#include + +namespace gtsam { +namespace initialize { + +static constexpr Key kAnchorKey = 99999999; + +/** + * Select the subgraph of betweenFactors and transforms priors into between + * wrt a fictitious node + */ +template +static NonlinearFactorGraph buildPoseGraph(const NonlinearFactorGraph& graph) { + NonlinearFactorGraph poseGraph; + + for (const auto& factor : graph) { + // recast to a between on Pose + if (auto between = + boost::dynamic_pointer_cast >(factor)) + poseGraph.add(between); + + // recast PriorFactor to BetweenFactor + if (auto prior = boost::dynamic_pointer_cast >(factor)) + poseGraph.emplace_shared >( + kAnchorKey, prior->keys()[0], prior->prior(), prior->noiseModel()); + } + return poseGraph; +} + +/** + * Use Gauss-Newton optimizer to optimize for poses given rotation estimates + */ +template +static Values computePoses(const Values& initialRot, + NonlinearFactorGraph* posegraph, + bool singleIter = true) { + const auto origin = Pose().translation(); + + // Upgrade rotations to full poses + Values initialPose; + for (const auto& key_value : initialRot) { + Key key = key_value.key; + const auto& rot = initialRot.at(key); + Pose initializedPose = Pose(rot, origin); + initialPose.insert(key, initializedPose); + } + + // add prior on dummy node + auto priorModel = noiseModel::Unit::Create(Pose::dimension); + initialPose.insert(kAnchorKey, Pose()); + posegraph->emplace_shared >(kAnchorKey, Pose(), priorModel); + + // Create optimizer + GaussNewtonParams params; + if (singleIter) { + params.maxIterations = 1; + } else { + params.setVerbosity("TERMINATION"); + } + GaussNewtonOptimizer optimizer(*posegraph, initialPose, params); + Values GNresult = optimizer.optimize(); + + // put into Values structure + Values estimate; + for (const auto& key_value : GNresult) { + Key key = key_value.key; + if (key != kAnchorKey) { + const Pose& pose = GNresult.at(key); + estimate.insert(key, pose); + } + } + return estimate; +} +} // namespace initialize +} // namespace gtsam diff --git a/gtsam/slam/InitializePose3.cpp b/gtsam/slam/InitializePose3.cpp index 3d4a4d40d..af1fc609e 100644 --- a/gtsam/slam/InitializePose3.cpp +++ b/gtsam/slam/InitializePose3.cpp @@ -10,28 +10,31 @@ * -------------------------------------------------------------------------- */ /** - * @file InitializePose3.h + * @file InitializePose3.cpp * @author Luca Carlone * @author Frank Dellaert * @date August, 2014 */ #include + +#include #include #include #include #include +#include #include #include #include +#include + using namespace std; namespace gtsam { -static const Key kAnchorKey = symbol('Z', 9999999); - /* ************************************************************************* */ GaussianFactorGraph InitializePose3::buildLinearOrientationGraph(const NonlinearFactorGraph& g) { @@ -62,7 +65,7 @@ GaussianFactorGraph InitializePose3::buildLinearOrientationGraph(const Nonlinear } // prior on the anchor orientation linearGraph.add( - kAnchorKey, I_9x9, + initialize::kAnchorKey, I_9x9, (Vector(9) << 1.0, 0.0, 0.0, /* */ 0.0, 1.0, 0.0, /* */ 0.0, 0.0, 1.0) .finished(), noiseModel::Isotropic::Precision(9, 1)); @@ -78,7 +81,7 @@ Values InitializePose3::normalizeRelaxedRotations( Values validRot3; for(const auto& it: relaxedRot3) { Key key = it.first; - if (key != kAnchorKey) { + if (key != initialize::kAnchorKey) { Matrix3 M; M << Eigen::Map(it.second.data()); // Recover M from vectorized @@ -91,24 +94,10 @@ Values InitializePose3::normalizeRelaxedRotations( } /* ************************************************************************* */ -NonlinearFactorGraph InitializePose3::buildPose3graph(const NonlinearFactorGraph& graph) { +NonlinearFactorGraph InitializePose3::buildPose3graph( + const NonlinearFactorGraph& graph) { gttic(InitializePose3_buildPose3graph); - NonlinearFactorGraph pose3Graph; - - for(const auto& factor: graph) { - - // recast to a between on Pose3 - const auto pose3Between = boost::dynamic_pointer_cast >(factor); - if (pose3Between) - pose3Graph.add(pose3Between); - - // recast PriorFactor to BetweenFactor - const auto pose3Prior = boost::dynamic_pointer_cast >(factor); - if (pose3Prior) - pose3Graph.emplace_shared >(kAnchorKey, pose3Prior->keys()[0], - pose3Prior->prior(), pose3Prior->noiseModel()); - } - return pose3Graph; + return initialize::buildPoseGraph(graph); } /* ************************************************************************* */ @@ -134,7 +123,7 @@ Values InitializePose3::computeOrientationsGradient( // this works on the inverse rotations, according to Tron&Vidal,2011 Values inverseRot; - inverseRot.insert(kAnchorKey, Rot3()); + inverseRot.insert(initialize::kAnchorKey, Rot3()); for(const auto& key_value: givenGuess) { Key key = key_value.key; const Pose3& pose = givenGuess.at(key); @@ -145,7 +134,7 @@ Values InitializePose3::computeOrientationsGradient( KeyVectorMap adjEdgesMap; KeyRotMap factorId2RotMap; - createSymbolicGraph(adjEdgesMap, factorId2RotMap, pose3Graph); + createSymbolicGraph(pose3Graph, &adjEdgesMap, &factorId2RotMap); // calculate max node degree & allocate gradient size_t maxNodeDeg = 0; @@ -212,11 +201,11 @@ Values InitializePose3::computeOrientationsGradient( } // enf of gradient iterations // Return correct rotations - const Rot3& Rref = inverseRot.at(kAnchorKey); // This will be set to the identity as so far we included no prior + const Rot3& Rref = inverseRot.at(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior Values estimateRot; for(const auto& key_value: inverseRot) { Key key = key_value.key; - if (key != kAnchorKey) { + if (key != initialize::kAnchorKey) { const Rot3& R = inverseRot.at(key); if(setRefFrame) estimateRot.insert(key, Rref.compose(R.inverse())); @@ -228,32 +217,34 @@ Values InitializePose3::computeOrientationsGradient( } /* ************************************************************************* */ -void InitializePose3::createSymbolicGraph(KeyVectorMap& adjEdgesMap, KeyRotMap& factorId2RotMap, - const NonlinearFactorGraph& pose3Graph) { +void InitializePose3::createSymbolicGraph( + const NonlinearFactorGraph& pose3Graph, KeyVectorMap* adjEdgesMap, + KeyRotMap* factorId2RotMap) { size_t factorId = 0; - for(const auto& factor: pose3Graph) { - auto pose3Between = boost::dynamic_pointer_cast >(factor); - if (pose3Between){ + for (const auto& factor : pose3Graph) { + auto pose3Between = + boost::dynamic_pointer_cast >(factor); + if (pose3Between) { Rot3 Rij = pose3Between->measured().rotation(); - factorId2RotMap.insert(pair(factorId,Rij)); + factorId2RotMap->emplace(factorId, Rij); Key key1 = pose3Between->key1(); - if (adjEdgesMap.find(key1) != adjEdgesMap.end()){ // key is already in - adjEdgesMap.at(key1).push_back(factorId); - }else{ + if (adjEdgesMap->find(key1) != adjEdgesMap->end()) { // key is already in + adjEdgesMap->at(key1).push_back(factorId); + } else { vector edge_id; edge_id.push_back(factorId); - adjEdgesMap.insert(pair >(key1, edge_id)); + adjEdgesMap->emplace(key1, edge_id); } Key key2 = pose3Between->key2(); - if (adjEdgesMap.find(key2) != adjEdgesMap.end()){ // key is already in - adjEdgesMap.at(key2).push_back(factorId); - }else{ + if (adjEdgesMap->find(key2) != adjEdgesMap->end()) { // key is already in + adjEdgesMap->at(key2).push_back(factorId); + } else { vector edge_id; edge_id.push_back(factorId); - adjEdgesMap.insert(pair >(key2, edge_id)); + adjEdgesMap->emplace(key2, edge_id); } - }else{ + } else { cout << "Error in createSymbolicGraph" << endl; } factorId++; @@ -293,50 +284,16 @@ Values InitializePose3::initializeOrientations(const NonlinearFactorGraph& graph } ///* ************************************************************************* */ -Values InitializePose3::computePoses(NonlinearFactorGraph& pose3graph, Values& initialRot) { +Values InitializePose3::computePoses(const Values& initialRot, + NonlinearFactorGraph* posegraph, + bool singleIter) { gttic(InitializePose3_computePoses); - - // put into Values structure - Values initialPose; - for (const auto& key_value : initialRot) { - Key key = key_value.key; - const Rot3& rot = initialRot.at(key); - Pose3 initializedPose = Pose3(rot, Point3(0, 0, 0)); - initialPose.insert(key, initializedPose); - } - - // add prior - noiseModel::Unit::shared_ptr priorModel = noiseModel::Unit::Create(6); - initialPose.insert(kAnchorKey, Pose3()); - pose3graph.emplace_shared >(kAnchorKey, Pose3(), priorModel); - - // Create optimizer - GaussNewtonParams params; - bool singleIter = true; - if (singleIter) { - params.maxIterations = 1; - } else { - cout << " \n\n\n\n performing more than 1 GN iterations \n\n\n" << endl; - params.setVerbosity("TERMINATION"); - } - GaussNewtonOptimizer optimizer(pose3graph, initialPose, params); - Values GNresult = optimizer.optimize(); - - // put into Values structure - Values estimate; - for (const auto& key_value : GNresult) { - Key key = key_value.key; - if (key != kAnchorKey) { - const Pose3& pose = GNresult.at(key); - estimate.insert(key, pose); - } - } - return estimate; + return initialize::computePoses(initialRot, posegraph, singleIter); } /* ************************************************************************* */ -Values InitializePose3::initialize(const NonlinearFactorGraph& graph, const Values& givenGuess, - bool useGradient) { +Values InitializePose3::initialize(const NonlinearFactorGraph& graph, + const Values& givenGuess, bool useGradient) { gttic(InitializePose3_initialize); Values initialValues; @@ -352,7 +309,7 @@ Values InitializePose3::initialize(const NonlinearFactorGraph& graph, const Valu orientations = computeOrientationsChordal(pose3Graph); // Compute the full poses (1 GN iteration on full poses) - return computePoses(pose3Graph, orientations); + return computePoses(orientations, &pose3Graph); } /* ************************************************************************* */ diff --git a/gtsam/slam/InitializePose3.h b/gtsam/slam/InitializePose3.h index ce1b28854..bf86ab6f2 100644 --- a/gtsam/slam/InitializePose3.h +++ b/gtsam/slam/InitializePose3.h @@ -26,6 +26,9 @@ #include #include +#include +#include + namespace gtsam { typedef std::map > KeyVectorMap; @@ -50,9 +53,9 @@ struct GTSAM_EXPORT InitializePose3 { const NonlinearFactorGraph& pose3Graph, const Values& givenGuess, size_t maxIter = 10000, const bool setRefFrame = true); - static void createSymbolicGraph(KeyVectorMap& adjEdgesMap, - KeyRotMap& factorId2RotMap, - const NonlinearFactorGraph& pose3Graph); + static void createSymbolicGraph(const NonlinearFactorGraph& pose3Graph, + KeyVectorMap* adjEdgesMap, + KeyRotMap* factorId2RotMap); static Vector3 gradientTron(const Rot3& R1, const Rot3& R2, const double a, const double b); @@ -64,8 +67,12 @@ struct GTSAM_EXPORT InitializePose3 { static NonlinearFactorGraph buildPose3graph( const NonlinearFactorGraph& graph); - static Values computePoses(NonlinearFactorGraph& pose3graph, - Values& initialRot); + /** + * Use Gauss-Newton optimizer to optimize for poses given rotation estimates + */ + static Values computePoses(const Values& initialRot, + NonlinearFactorGraph* poseGraph, + bool singleIter = true); /** * "extract" the Pose3 subgraph of the original graph, get orientations from diff --git a/gtsam/slam/KarcherMeanFactor-inl.h b/gtsam/slam/KarcherMeanFactor-inl.h index f10cc7e42..c81a9adc5 100644 --- a/gtsam/slam/KarcherMeanFactor-inl.h +++ b/gtsam/slam/KarcherMeanFactor-inl.h @@ -58,20 +58,22 @@ T FindKarcherMean(std::initializer_list&& rotations) { template template -KarcherMeanFactor::KarcherMeanFactor(const CONTAINER& keys, int d) - : NonlinearFactor(keys) { +KarcherMeanFactor::KarcherMeanFactor(const CONTAINER &keys, int d, + boost::optional beta) + : NonlinearFactor(keys), d_(static_cast(d)) { if (d <= 0) { throw std::invalid_argument( "KarcherMeanFactor needs dimension for dynamic types."); } - // Create the constant Jacobian made of D*D identity matrices, - // where D is the dimensionality of the manifold. - const auto I = Eigen::MatrixXd::Identity(d, d); + // Create the constant Jacobian made of d*d identity matrices, + // where d is the dimensionality of the manifold. + Matrix A = Matrix::Identity(d, d); + if (beta) A *= std::sqrt(*beta); std::map terms; for (Key j : keys) { - terms[j] = I; + terms[j] = A; } - jacobian_ = - boost::make_shared(terms, Eigen::VectorXd::Zero(d)); + whitenedJacobian_ = + boost::make_shared(terms, Vector::Zero(d)); } } // namespace gtsam \ No newline at end of file diff --git a/gtsam/slam/KarcherMeanFactor.h b/gtsam/slam/KarcherMeanFactor.h index 54b3930d4..b7cd3b11a 100644 --- a/gtsam/slam/KarcherMeanFactor.h +++ b/gtsam/slam/KarcherMeanFactor.h @@ -30,44 +30,51 @@ namespace gtsam { * PriorFactors. */ template -T FindKarcherMean(const std::vector>& rotations); +T FindKarcherMean(const std::vector> &rotations); -template -T FindKarcherMean(std::initializer_list&& rotations); +template T FindKarcherMean(std::initializer_list &&rotations); /** * The KarcherMeanFactor creates a constraint on all SO(n) variables with * given keys that the Karcher mean (see above) will stay the same. Note the * mean itself is irrelevant to the constraint and is not a parameter: the * constraint is implemented as enforcing that the sum of local updates is - * equal to zero, hence creating a rank-dim constraint. Note it is implemented as - * a soft constraint, as typically it is used to fix a gauge freedom. + * equal to zero, hence creating a rank-dim constraint. Note it is implemented + * as a soft constraint, as typically it is used to fix a gauge freedom. * */ -template -class KarcherMeanFactor : public NonlinearFactor { +template class KarcherMeanFactor : public NonlinearFactor { + // Compile time dimension: can be -1 + enum { D = traits::dimension }; + + // Runtime dimension: always >=0 + size_t d_; + /// Constant Jacobian made of d*d identity matrices - boost::shared_ptr jacobian_; + boost::shared_ptr whitenedJacobian_; - enum {D = traits::dimension}; - - public: - /// Construct from given keys. +public: + /** + * Construct from given keys. + * If parameter beta is given, it acts as a precision = 1/sigma^2, and + * the Jacobian will be multiplied with 1/sigma = sqrt(beta). + */ template - KarcherMeanFactor(const CONTAINER& keys, int d=D); + KarcherMeanFactor(const CONTAINER &keys, int d = D, + boost::optional beta = boost::none); /// Destructor virtual ~KarcherMeanFactor() {} /// Calculate the error of the factor: always zero - double error(const Values& c) const override { return 0; } + double error(const Values &c) const override { return 0; } /// get the dimension of the factor (number of rows on linearization) - size_t dim() const override { return D; } + size_t dim() const override { return d_; } /// linearize to a GaussianFactor - boost::shared_ptr linearize(const Values& c) const override { - return jacobian_; + boost::shared_ptr linearize(const Values &c) const override { + return whitenedJacobian_; } }; // \KarcherMeanFactor -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 3a2d04127..4bd7bc7e2 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -21,21 +21,25 @@ #include #include #include + #include #include #include + +#include +#include + +#include + #include #include -#include -#include -#include -#include + #include #include #include -#include #include #include +#include #include #include @@ -49,14 +53,16 @@ using namespace std; namespace fs = boost::filesystem; -using namespace gtsam::symbol_shorthand; +using gtsam::symbol_shorthand::L; +using gtsam::symbol_shorthand::P; +using gtsam::symbol_shorthand::X; #define LINESIZE 81920 namespace gtsam { /* ************************************************************************* */ -string findExampleDataFile(const string& name) { +string findExampleDataFile(const string &name) { // Search source tree and installed location vector rootsToSearch; @@ -71,10 +77,11 @@ string findExampleDataFile(const string& name) { namesToSearch.push_back(name + ".txt"); namesToSearch.push_back(name + ".out"); namesToSearch.push_back(name + ".xml"); + namesToSearch.push_back(name + ".g2o"); // Find first name that exists - for(const fs::path root: rootsToSearch) { - for(const fs::path name: namesToSearch) { + for (const fs::path root : rootsToSearch) { + for (const fs::path name : namesToSearch) { if (fs::is_regular_file(root / name)) return (root / name).string(); } @@ -82,79 +89,167 @@ string findExampleDataFile(const string& name) { // If we did not return already, then we did not find the file throw invalid_argument( - "gtsam::findExampleDataFile could not find a matching file in\n" - GTSAM_SOURCE_TREE_DATASET_DIR " or\n" - GTSAM_INSTALLED_DATASET_DIR " named\n" + name + ", " + name - + ".graph, or " + name + ".txt"); + "gtsam::findExampleDataFile could not find a matching " + "file in\n" GTSAM_SOURCE_TREE_DATASET_DIR + " or\n" GTSAM_INSTALLED_DATASET_DIR " named\n" + + name + ", " + name + ".g2o, " + ", " + name + ".graph, or " + name + + ".txt"); } /* ************************************************************************* */ -string createRewrittenFileName(const string& name) { +string createRewrittenFileName(const string &name) { // Search source tree and installed location if (!exists(fs::path(name))) { throw invalid_argument( - "gtsam::createRewrittenFileName could not find a matching file in\n" - + name); + "gtsam::createRewrittenFileName could not find a matching file in\n" + + name); } fs::path p(name); - fs::path newpath = fs::path(p.parent_path().string()) - / fs::path(p.stem().string() + "-rewritten.txt"); + fs::path newpath = fs::path(p.parent_path().string()) / + fs::path(p.stem().string() + "-rewritten.txt"); return newpath.string(); } /* ************************************************************************* */ -GraphAndValues load2D(pair dataset, int maxID, - bool addNoise, bool smart, NoiseFormat noiseFormat, - KernelFunctionType kernelFunctionType) { - return load2D(dataset.first, dataset.second, maxID, addNoise, smart, - noiseFormat, kernelFunctionType); +// Type for parser functions used in parseLines below. +template +using Parser = + std::function(istream &is, const string &tag)>; + +// Parse a file by calling the parse(is, tag) function for every line. +// The result of calling the function is ignored, so typically parse function +// works via a side effect, like adding a factor into a graph etc. +template +static void parseLines(const string &filename, Parser parse) { + ifstream is(filename.c_str()); + if (!is) + throw invalid_argument("parse: can not find file " + filename); + string tag; + while (is >> tag) { + parse(is, tag); // ignore return value + is.ignore(LINESIZE, '\n'); + } } /* ************************************************************************* */ -// Read noise parameters and interpret them according to flags -static SharedNoiseModel readNoiseModel(ifstream& is, bool smart, - NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType) { - double v1, v2, v3, v4, v5, v6; - is >> v1 >> v2 >> v3 >> v4 >> v5 >> v6; +// Parse types T into a size_t-indexed map +template +map parseToMap(const string &filename, Parser> parse, + size_t maxIndex) { + map result; + Parser> emplace = [&](istream &is, const string &tag) { + if (auto t = parse(is, tag)) { + if (!maxIndex || t->first <= maxIndex) + result.emplace(*t); + } + return boost::none; + }; + parseLines(filename, emplace); + return result; +} +/* ************************************************************************* */ +// Parse a file and push results on a vector +template +static vector parseToVector(const string &filename, Parser parse) { + vector result; + Parser add = [&result, parse](istream &is, const string &tag) { + if (auto t = parse(is, tag)) + result.push_back(*t); + return boost::none; + }; + parseLines(filename, add); + return result; +} + +/* ************************************************************************* */ +boost::optional parseVertexPose(istream &is, const string &tag) { + if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { + size_t id; + double x, y, yaw; + if (!(is >> id >> x >> y >> yaw)) { + throw std::runtime_error("parseVertexPose encountered malformed line"); + } + return IndexedPose(id, Pose2(x, y, yaw)); + } else { + return boost::none; + } +} + +template <> +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexPose, maxIndex); +} + +/* ************************************************************************* */ +boost::optional parseVertexLandmark(istream &is, + const string &tag) { + if (tag == "VERTEX_XY") { + size_t id; + double x, y; + if (!(is >> id >> x >> y)) { + throw std::runtime_error( + "parseVertexLandmark encountered malformed line"); + } + return IndexedLandmark(id, Point2(x, y)); + } else { + return boost::none; + } +} + +template <> +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexLandmark, maxIndex); +} + +/* ************************************************************************* */ +// Interpret noise parameters according to flags +static SharedNoiseModel +createNoiseModel(const Vector6 v, bool smart, NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { if (noiseFormat == NoiseFormatAUTO) { // Try to guess covariance matrix layout - if (v1 != 0.0 && v2 == 0.0 && v3 != 0.0 && v4 != 0.0 && v5 == 0.0 - && v6 == 0.0) { - // NoiseFormatGRAPH + if (v(0) != 0.0 && v(1) == 0.0 && v(2) != 0.0 && // + v(3) != 0.0 && v(4) == 0.0 && v(5) == 0.0) { noiseFormat = NoiseFormatGRAPH; - } else if (v1 != 0.0 && v2 == 0.0 && v3 == 0.0 && v4 != 0.0 && v5 == 0.0 - && v6 != 0.0) { - // NoiseFormatCOV + } else if (v(0) != 0.0 && v(1) == 0.0 && v(2) == 0.0 && // + v(3) != 0.0 && v(4) == 0.0 && v(5) != 0.0) { noiseFormat = NoiseFormatCOV; } else { throw std::invalid_argument( - "load2D: unrecognized covariance matrix format in dataset file. Please specify the noise format."); + "load2D: unrecognized covariance matrix format in dataset file. " + "Please specify the noise format."); } } // Read matrix and check that diagonal entries are non-zero - Matrix M(3, 3); + Matrix3 M; switch (noiseFormat) { case NoiseFormatG2O: case NoiseFormatCOV: - // i.e., [ v1 v2 v3; v2' v4 v5; v3' v5' v6 ] - if (v1 == 0.0 || v4 == 0.0 || v6 == 0.0) + // i.e., [v(0) v(1) v(2); + // v(1)' v(3) v(4); + // v(2)' v(4)' v(5) ] + if (v(0) == 0.0 || v(3) == 0.0 || v(5) == 0.0) throw runtime_error( "load2D::readNoiseModel looks like this is not G2O matrix order"); - M << v1, v2, v3, v2, v4, v5, v3, v5, v6; + M << v(0), v(1), v(2), v(1), v(3), v(4), v(2), v(4), v(5); break; case NoiseFormatTORO: case NoiseFormatGRAPH: // http://www.openslam.org/toro.html // inf_ff inf_fs inf_ss inf_rr inf_fr inf_sr - // i.e., [ v1 v2 v5; v2' v3 v6; v5' v6' v4 ] - if (v1 == 0.0 || v3 == 0.0 || v4 == 0.0) + // i.e., [v(0) v(1) v(4); + // v(1)' v(2) v(5); + // v(4)' v(5)' v(3) ] + if (v(0) == 0.0 || v(2) == 0.0 || v(3) == 0.0) throw invalid_argument( "load2D::readNoiseModel looks like this is not TORO matrix order"); - M << v1, v2, v5, v2, v3, v6, v5, v6, v4; + M << v(0), v(1), v(4), v(1), v(2), v(5), v(4), v(5), v(3); break; default: throw runtime_error("load2D: invalid noise format"); @@ -195,363 +290,445 @@ static SharedNoiseModel readNoiseModel(ifstream& is, bool smart, } } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 /* ************************************************************************* */ -boost::optional parseVertex(istream& is, const string& tag) { - return parseVertexPose(is, tag); -} -#endif +boost::optional parseEdge(istream &is, const string &tag) { + if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") || + (tag == "ODOMETRY")) { -/* ************************************************************************* */ -boost::optional parseVertexPose(istream& is, const string& tag) { - if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { - Key id; + size_t id1, id2; double x, y, yaw; - is >> id >> x >> y >> yaw; - return IndexedPose(id, Pose2(x, y, yaw)); + if (!(is >> id1 >> id2 >> x >> y >> yaw)) { + throw std::runtime_error("parseEdge encountered malformed line"); + } + return IndexedEdge({id1, id2}, Pose2(x, y, yaw)); } else { return boost::none; } } /* ************************************************************************* */ -boost::optional parseVertexLandmark(istream& is, const string& tag) { - if (tag == "VERTEX_XY") { - Key id; - double x, y; - is >> id >> x >> y; - return IndexedLandmark(id, Point2(x, y)); - } else { - return boost::none; +// Measurement parsers are implemented as a functor, as they have several +// options to filter and create the measurement model. +template struct ParseMeasurement; + +/* ************************************************************************* */ +// Converting from Measurement to BetweenFactor is generic +template struct ParseFactor : ParseMeasurement { + ParseFactor(const ParseMeasurement &parent) + : ParseMeasurement(parent) {} + + // We parse a measurement then convert + typename boost::optional::shared_ptr> + operator()(istream &is, const string &tag) { + if (auto m = ParseMeasurement::operator()(is, tag)) + return boost::make_shared>( + m->key1(), m->key2(), m->measured(), m->noiseModel()); + else + return boost::none; } +}; + +/* ************************************************************************* */ +// Pose2 measurement parser +template <> struct ParseMeasurement { + // The arguments + boost::shared_ptr sampler; + size_t maxIndex; + + // For noise model creation + bool smart; + NoiseFormat noiseFormat; + KernelFunctionType kernelFunctionType; + + // If this is not null, will use instead of parsed model: + SharedNoiseModel model; + + // The actual parser + boost::optional> operator()(istream &is, + const string &tag) { + auto edge = parseEdge(is, tag); + if (!edge) + return boost::none; + + // parse noise model + Vector6 v; + is >> v(0) >> v(1) >> v(2) >> v(3) >> v(4) >> v(5); + + // optional filter + size_t id1, id2; + tie(id1, id2) = edge->first; + if (maxIndex && (id1 > maxIndex || id2 > maxIndex)) + return boost::none; + + // Get pose and optionally add noise + Pose2 &pose = edge->second; + if (sampler) + pose = pose.retract(sampler->sample()); + + // emplace measurement + auto modelFromFile = + createNoiseModel(v, smart, noiseFormat, kernelFunctionType); + return BinaryMeasurement(id1, id2, pose, + model ? model : modelFromFile); + } +}; + +/* ************************************************************************* */ +// Create a sampler to corrupt a measurement +boost::shared_ptr createSampler(const SharedNoiseModel &model) { + auto noise = boost::dynamic_pointer_cast(model); + if (!noise) + throw invalid_argument("gtsam::load: invalid noise model for adding noise" + "(current version assumes diagonal noise model)!"); + return boost::shared_ptr(new Sampler(noise)); } /* ************************************************************************* */ -boost::optional parseEdge(istream& is, const string& tag) { - if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") - || (tag == "ODOMETRY")) { - - Key id1, id2; - double x, y, yaw; - is >> id1 >> id2 >> x >> y >> yaw; - return IndexedEdge(pair(id1, id2), Pose2(x, y, yaw)); - } else { - return boost::none; - } +// Implementation of parseMeasurements for Pose2 +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseMeasurement parse{model ? createSampler(model) : nullptr, + maxIndex, true, NoiseFormatAUTO, + KernelFunctionTypeNONE}; + return parseToVector>(filename, parse); } /* ************************************************************************* */ -GraphAndValues load2D(const string& filename, SharedNoiseModel model, Key maxID, - bool addNoise, bool smart, NoiseFormat noiseFormat, - KernelFunctionType kernelFunctionType) { +// Implementation of parseMeasurements for Rot2, which converts from Pose2 - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("load2D: can not find file " + filename); +// Extract Rot2 measurement from Pose2 measurement +static BinaryMeasurement convert(const BinaryMeasurement &p) { + auto gaussian = + boost::dynamic_pointer_cast(p.noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose2 measurements " + "with Gaussian noise models."); + const Matrix3 M = gaussian->covariance(); + return BinaryMeasurement(p.key1(), p.key2(), p.measured().rotation(), + noiseModel::Isotropic::Variance(1, M(2, 2))); +} - Values::shared_ptr initial(new Values); - NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + auto poses = parseMeasurements(filename, model, maxIndex); + std::vector> result; + result.reserve(poses.size()); + for (const auto &p : poses) + result.push_back(convert(p)); + return result; +} - string tag; +/* ************************************************************************* */ +// Implementation of parseFactors for Pose2 +template <> +std::vector::shared_ptr> +parseFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseFactor parse({model ? createSampler(model) : nullptr, maxIndex, + true, NoiseFormatAUTO, KernelFunctionTypeNONE, + nullptr}); + return parseToVector::shared_ptr>(filename, parse); +} - // load the poses and landmarks - while (!is.eof()) { - if (!(is >> tag)) - break; +/* ************************************************************************* */ +using BearingRange2D = BearingRange; - const auto indexed_pose = parseVertexPose(is, tag); - if (indexed_pose) { - Key id = indexed_pose->first; +template <> struct ParseMeasurement { + // arguments + size_t maxIndex; - // optional filter - if (maxID && id >= maxID) - continue; + // The actual parser + boost::optional> + operator()(istream &is, const string &tag) { + if (tag != "BR" && tag != "LANDMARK") + return boost::none; - initial->insert(id, indexed_pose->second); - } - const auto indexed_landmark = parseVertexLandmark(is, tag); - if (indexed_landmark) { - Key id = indexed_landmark->first; - - // optional filter - if (maxID && id >= maxID) - continue; - - initial->insert(id, indexed_landmark->second); - } - is.ignore(LINESIZE, '\n'); - } - is.clear(); /* clears the end-of-file and error flags */ - is.seekg(0, ios::beg); - - // If asked, create a sampler with random number generator - std::unique_ptr sampler; - if (addNoise) { - noiseModel::Diagonal::shared_ptr noise; - if (model) - noise = boost::dynamic_pointer_cast(model); - if (!noise) - throw invalid_argument( - "gtsam::load2D: invalid noise model for adding noise" - "(current version assumes diagonal noise model)!"); - sampler.reset(new Sampler(noise)); - } - - // Parse the pose constraints - Key id1, id2; - bool haveLandmark = false; - const bool useModelInFile = !model; - while (!is.eof()) { - if (!(is >> tag)) - break; - - auto between_pose = parseEdge(is, tag); - if (between_pose) { - std::tie(id1, id2) = between_pose->first; - Pose2& l1Xl2 = between_pose->second; - - // read noise model - SharedNoiseModel modelInFile = readNoiseModel(is, smart, noiseFormat, - kernelFunctionType); - - // optional filter - if (maxID && (id1 >= maxID || id2 >= maxID)) - continue; - - if (useModelInFile) - model = modelInFile; - - if (addNoise) - l1Xl2 = l1Xl2.retract(sampler->sample()); - - // Insert vertices if pure odometry file - if (!initial->exists(id1)) - initial->insert(id1, Pose2()); - if (!initial->exists(id2)) - initial->insert(id2, initial->at(id1) * l1Xl2); - - NonlinearFactor::shared_ptr factor( - new BetweenFactor(id1, id2, l1Xl2, model)); - graph->push_back(factor); - } - // Parse measurements + size_t id1, id2; + is >> id1 >> id2; double bearing, range, bearing_std, range_std; - // A bearing-range measurement if (tag == "BR") { - is >> id1 >> id2 >> bearing >> range >> bearing_std >> range_std; + is >> bearing >> range >> bearing_std >> range_std; } - // A landmark measurement, TODO Frank says: don't know why is converted to bearing-range + // A landmark measurement, converted to bearing-range if (tag == "LANDMARK") { double lmx, lmy; double v1, v2, v3; - is >> id1 >> id2 >> lmx >> lmy >> v1 >> v2 >> v3; + is >> lmx >> lmy >> v1 >> v2 >> v3; // Convert x,y to bearing,range bearing = atan2(lmy, lmx); range = sqrt(lmx * lmx + lmy * lmy); - // In our experience, the x-y covariance on landmark sightings is not very good, so assume - // it describes the uncertainty at a range of 10m, and convert that to bearing/range uncertainty. + // In our experience, the x-y covariance on landmark sightings is not + // very good, so assume it describes the uncertainty at a range of 10m, + // and convert that to bearing/range uncertainty. if (std::abs(v1 - v3) < 1e-4) { bearing_std = sqrt(v1 / 10.0); range_std = sqrt(v1); } else { + // TODO(frank): we are ignoring the non-uniform covariance bearing_std = 1; range_std = 1; - if (!haveLandmark) { - cout - << "Warning: load2D is a very simple dataset loader and is ignoring the\n" - "non-uniform covariance on LANDMARK measurements in this file." - << endl; - haveLandmark = true; - } } } - // Do some common stuff for bearing-range measurements - if (tag == "LANDMARK" || tag == "BR") { + // optional filter + if (maxIndex && id1 > maxIndex) + return boost::none; - // optional filter - if (maxID && id1 >= maxID) - continue; + // Create noise model + auto measurementNoise = noiseModel::Diagonal::Sigmas( + (Vector(2) << bearing_std, range_std).finished()); - // Create noise model - noiseModel::Diagonal::shared_ptr measurementNoise = - noiseModel::Diagonal::Sigmas((Vector(2) << bearing_std, range_std).finished()); + return BinaryMeasurement( + id1, L(id2), BearingRange2D(bearing, range), measurementNoise); + } +}; - // Add to graph - *graph += BearingRangeFactor(id1, L(id2), bearing, range, - measurementNoise); +/* ************************************************************************* */ +GraphAndValues load2D(const string &filename, SharedNoiseModel model, + size_t maxIndex, bool addNoise, bool smart, + NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { + + // Single pass for poses and landmarks. + auto initial = boost::make_shared(); + Parser insert = [maxIndex, &initial](istream &is, const string &tag) { + if (auto indexedPose = parseVertexPose(is, tag)) { + if (!maxIndex || indexedPose->first <= maxIndex) + initial->insert(indexedPose->first, indexedPose->second); + } else if (auto indexedLandmark = parseVertexLandmark(is, tag)) { + if (!maxIndex || indexedLandmark->first <= maxIndex) + initial->insert(L(indexedLandmark->first), indexedLandmark->second); + } + return 0; + }; + parseLines(filename, insert); + + // Single pass for Pose2 and bearing-range factors. + auto graph = boost::make_shared(); + + // Instantiate factor parser + ParseFactor parseBetweenFactor( + {addNoise ? createSampler(model) : nullptr, maxIndex, smart, noiseFormat, + kernelFunctionType, model}); + + // Instantiate bearing-range parser + ParseMeasurement parseBearingRange{maxIndex}; + + // Combine in a single parser that adds factors to `graph`, but also inserts + // new variables into `initial` when needed. + Parser parse = [&](istream &is, const string &tag) { + if (auto f = parseBetweenFactor(is, tag)) { + graph->push_back(*f); + + // Insert vertices if pure odometry file + Key key1 = (*f)->key1(), key2 = (*f)->key2(); + if (!initial->exists(key1)) + initial->insert(key1, Pose2()); + if (!initial->exists(key2)) + initial->insert(key2, initial->at(key1) * (*f)->measured()); + } else if (auto m = parseBearingRange(is, tag)) { + Key key1 = m->key1(), key2 = m->key2(); + BearingRange2D br = m->measured(); + graph->emplace_shared>(key1, key2, br, + m->noiseModel()); // Insert poses or points if they do not exist yet - if (!initial->exists(id1)) - initial->insert(id1, Pose2()); - if (!initial->exists(L(id2))) { - Pose2 pose = initial->at(id1); - Point2 local(cos(bearing) * range, sin(bearing) * range); + if (!initial->exists(key1)) + initial->insert(key1, Pose2()); + if (!initial->exists(key2)) { + Pose2 pose = initial->at(key1); + Point2 local = br.bearing() * Point2(br.range(), 0); Point2 global = pose.transformFrom(local); - initial->insert(L(id2), global); + initial->insert(key2, global); } } - is.ignore(LINESIZE, '\n'); - } + return 0; + }; + + parseLines(filename, parse); return make_pair(graph, initial); } /* ************************************************************************* */ -GraphAndValues load2D_robust(const string& filename, - noiseModel::Base::shared_ptr& model, int maxID) { - return load2D(filename, model, maxID); +GraphAndValues load2D(pair dataset, size_t maxIndex, + bool addNoise, bool smart, NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { + return load2D(dataset.first, dataset.second, maxIndex, addNoise, smart, + noiseFormat, kernelFunctionType); } /* ************************************************************************* */ -void save2D(const NonlinearFactorGraph& graph, const Values& config, - const noiseModel::Diagonal::shared_ptr model, const string& filename) { +GraphAndValues load2D_robust(const string &filename, + const noiseModel::Base::shared_ptr &model, + size_t maxIndex) { + return load2D(filename, model, maxIndex); +} + +/* ************************************************************************* */ +void save2D(const NonlinearFactorGraph &graph, const Values &config, + const noiseModel::Diagonal::shared_ptr model, + const string &filename) { fstream stream(filename.c_str(), fstream::out); // save poses - - for(const Values::ConstKeyValuePair& key_value: config) { - const Pose2& pose = key_value.value.cast(); + for (const Values::ConstKeyValuePair key_value : config) { + const Pose2 &pose = key_value.value.cast(); stream << "VERTEX2 " << key_value.key << " " << pose.x() << " " << pose.y() - << " " << pose.theta() << endl; + << " " << pose.theta() << endl; } // save edges - Matrix R = model->R(); - Matrix RR = trans(R) * R; //prod(trans(R),R); - for(boost::shared_ptr factor_: graph) { - boost::shared_ptr > factor = - boost::dynamic_pointer_cast >(factor_); + // TODO(frank): why don't we use model in factor? + Matrix3 R = model->R(); + Matrix3 RR = R.transpose() * R; + for (auto f : graph) { + auto factor = boost::dynamic_pointer_cast>(f); if (!factor) continue; - Pose2 pose = factor->measured().inverse(); + const Pose2 pose = factor->measured().inverse(); stream << "EDGE2 " << factor->key2() << " " << factor->key1() << " " - << pose.x() << " " << pose.y() << " " << pose.theta() << " " << RR(0, 0) - << " " << RR(0, 1) << " " << RR(1, 1) << " " << RR(2, 2) << " " - << RR(0, 2) << " " << RR(1, 2) << endl; + << pose.x() << " " << pose.y() << " " << pose.theta() << " " + << RR(0, 0) << " " << RR(0, 1) << " " << RR(1, 1) << " " << RR(2, 2) + << " " << RR(0, 2) << " " << RR(1, 2) << endl; } stream.close(); } /* ************************************************************************* */ -GraphAndValues readG2o(const string& g2oFile, const bool is3D, +GraphAndValues readG2o(const string &g2oFile, const bool is3D, KernelFunctionType kernelFunctionType) { if (is3D) { return load3D(g2oFile); } else { // just call load2D - int maxID = 0; + size_t maxIndex = 0; bool addNoise = false; bool smart = true; - return load2D(g2oFile, SharedNoiseModel(), maxID, addNoise, smart, + return load2D(g2oFile, SharedNoiseModel(), maxIndex, addNoise, smart, NoiseFormatG2O, kernelFunctionType); } } /* ************************************************************************* */ -void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, - const string& filename) { +void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, + const string &filename) { fstream stream(filename.c_str(), fstream::out); + // Use a lambda here to more easily modify behavior in future. + auto index = [](gtsam::Key key) { return Symbol(key).index(); }; + // save 2D poses - for (const auto& key_value : estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Pose2& pose = p->value(); - stream << "VERTEX_SE2 " << key_value.key << " " << pose.x() << " " - << pose.y() << " " << pose.theta() << endl; + for (const auto key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Pose2 &pose = p->value(); + stream << "VERTEX_SE2 " << index(key_value.key) << " " << pose.x() << " " + << pose.y() << " " << pose.theta() << endl; } // save 3D poses - for(const auto& key_value: estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Pose3& pose = p->value(); + for (const auto key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Pose3 &pose = p->value(); const Point3 t = pose.translation(); const auto q = pose.rotation().toQuaternion(); - stream << "VERTEX_SE3:QUAT " << key_value.key << " " << t.x() << " " - << t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " " - << q.z() << " " << q.w() << endl; + stream << "VERTEX_SE3:QUAT " << index(key_value.key) << " " << t.x() << " " + << t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " " + << q.z() << " " << q.w() << endl; } // save 2D landmarks - for(const auto& key_value: estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Point2& point = p->value(); - stream << "VERTEX_XY " << key_value.key << " " << point.x() << " " - << point.y() << endl; + for (const auto key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Point2 &point = p->value(); + stream << "VERTEX_XY " << index(key_value.key) << " " << point.x() << " " + << point.y() << endl; } // save 3D landmarks - for(const auto& key_value: estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Point3& point = p->value(); - stream << "VERTEX_TRACKXYZ " << key_value.key << " " << point.x() << " " - << point.y() << " " << point.z() << endl; + for (const auto key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Point3 &point = p->value(); + stream << "VERTEX_TRACKXYZ " << index(key_value.key) << " " << point.x() + << " " << point.y() << " " << point.z() << endl; } // save edges (2D or 3D) - for(const auto& factor_: graph) { - boost::shared_ptr > factor = - boost::dynamic_pointer_cast >(factor_); - if (factor){ + for (const auto &factor_ : graph) { + auto factor = boost::dynamic_pointer_cast>(factor_); + if (factor) { SharedNoiseModel model = factor->noiseModel(); - boost::shared_ptr gaussianModel = + auto gaussianModel = boost::dynamic_pointer_cast(model); - if (!gaussianModel){ + if (!gaussianModel) { model->print("model\n"); throw invalid_argument("writeG2o: invalid noise model!"); } - Matrix Info = gaussianModel->R().transpose() * gaussianModel->R(); + Matrix3 Info = gaussianModel->R().transpose() * gaussianModel->R(); Pose2 pose = factor->measured(); //.inverse(); - stream << "EDGE_SE2 " << factor->key1() << " " << factor->key2() << " " - << pose.x() << " " << pose.y() << " " << pose.theta(); - for (int i = 0; i < 3; i++){ - for (int j = i; j < 3; j++){ + stream << "EDGE_SE2 " << index(factor->key1()) << " " + << index(factor->key2()) << " " << pose.x() << " " << pose.y() + << " " << pose.theta(); + for (size_t i = 0; i < 3; i++) { + for (size_t j = i; j < 3; j++) { stream << " " << Info(i, j); } } stream << endl; } - boost::shared_ptr< BetweenFactor > factor3D = - boost::dynamic_pointer_cast< BetweenFactor >(factor_); + auto factor3D = boost::dynamic_pointer_cast>(factor_); - if (factor3D){ + if (factor3D) { SharedNoiseModel model = factor3D->noiseModel(); boost::shared_ptr gaussianModel = boost::dynamic_pointer_cast(model); - if (!gaussianModel){ + if (!gaussianModel) { model->print("model\n"); throw invalid_argument("writeG2o: invalid noise model!"); } - Matrix Info = gaussianModel->R().transpose() * gaussianModel->R(); + Matrix6 Info = gaussianModel->R().transpose() * gaussianModel->R(); const Pose3 pose3D = factor3D->measured(); const Point3 p = pose3D.translation(); const auto q = pose3D.rotation().toQuaternion(); - stream << "EDGE_SE3:QUAT " << factor3D->key1() << " " << factor3D->key2() - << " " << p.x() << " " << p.y() << " " << p.z() << " " << q.x() - << " " << q.y() << " " << q.z() << " " << q.w(); + stream << "EDGE_SE3:QUAT " << index(factor3D->key1()) << " " + << index(factor3D->key2()) << " " << p.x() << " " << p.y() << " " + << p.z() << " " << q.x() << " " << q.y() << " " << q.z() << " " + << q.w(); - Matrix InfoG2o = I_6x6; - InfoG2o.block(0,0,3,3) = Info.block(3,3,3,3); // cov translation - InfoG2o.block(3,3,3,3) = Info.block(0,0,3,3); // cov rotation - InfoG2o.block(0,3,3,3) = Info.block(0,3,3,3); // off diagonal - InfoG2o.block(3,0,3,3) = Info.block(3,0,3,3); // off diagonal + Matrix6 InfoG2o = I_6x6; + InfoG2o.block<3, 3>(0, 0) = Info.block<3, 3>(3, 3); // cov translation + InfoG2o.block<3, 3>(3, 3) = Info.block<3, 3>(0, 0); // cov rotation + InfoG2o.block<3, 3>(0, 3) = Info.block<3, 3>(0, 3); // off diagonal + InfoG2o.block<3, 3>(3, 0) = Info.block<3, 3>(3, 0); // off diagonal - for (int i = 0; i < 6; i++){ - for (int j = i; j < 6; j++){ + for (size_t i = 0; i < 6; i++) { + for (size_t j = i; j < 6; j++) { stream << " " << InfoG2o(i, j); } } @@ -562,153 +739,215 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, } /* ************************************************************************* */ -static Rot3 NormalizedRot3(double w, double x, double y, double z) { +// parse quaternion in x,y,z,w order, and normalize to unit length +istream &operator>>(istream &is, Quaternion &q) { + double x, y, z, w; + is >> x >> y >> z >> w; const double norm = sqrt(w * w + x * x + y * y + z * z), f = 1.0 / norm; - return Rot3::Quaternion(f * w, f * x, f * y, f * z); + q = Quaternion(f * w, f * x, f * y, f * z); + return is; } /* ************************************************************************* */ -std::map parse3DPoses(const string& filename) { - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("parse3DPoses: can not find file " + filename); - - std::map poses; - while (!is.eof()) { - char buf[LINESIZE]; - is.getline(buf, LINESIZE); - istringstream ls(buf); - string tag; - ls >> tag; - - if (tag == "VERTEX3") { - Key id; - double x, y, z, roll, pitch, yaw; - ls >> id >> x >> y >> z >> roll >> pitch >> yaw; - poses.emplace(id, Pose3(Rot3::Ypr(yaw, pitch, roll), {x, y, z})); - } - if (tag == "VERTEX_SE3:QUAT") { - Key id; - double x, y, z, qx, qy, qz, qw; - ls >> id >> x >> y >> z >> qx >> qy >> qz >> qw; - poses.emplace(id, Pose3(NormalizedRot3(qw, qx, qy, qz), {x, y, z})); - } - } - return poses; +// parse Rot3 from roll, pitch, yaw +istream &operator>>(istream &is, Rot3 &R) { + double yaw, pitch, roll; + is >> roll >> pitch >> yaw; // notice order ! + R = Rot3::Ypr(yaw, pitch, roll); + return is; } /* ************************************************************************* */ -std::map parse3DLandmarks(const string& filename) { - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("parse3DLandmarks: can not find file " + filename); +boost::optional> parseVertexPose3(istream &is, + const string &tag) { + if (tag == "VERTEX3") { + size_t id; + double x, y, z; + Rot3 R; + is >> id >> x >> y >> z >> R; + return make_pair(id, Pose3(R, {x, y, z})); + } else if (tag == "VERTEX_SE3:QUAT") { + size_t id; + double x, y, z; + Quaternion q; + is >> id >> x >> y >> z >> q; + return make_pair(id, Pose3(q, {x, y, z})); + } else + return boost::none; +} - std::map landmarks; - while (!is.eof()) { - char buf[LINESIZE]; - is.getline(buf, LINESIZE); - istringstream ls(buf); - string tag; - ls >> tag; - - if (tag == "VERTEX_TRACKXYZ") { - Key id; - double x, y, z; - ls >> id >> x >> y >> z; - landmarks.emplace(id, Point3(x, y, z)); - } - } - return landmarks; +template <> +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexPose3, maxIndex); } /* ************************************************************************* */ -BetweenFactorPose3s parse3DFactors( - const string& filename, - const noiseModel::Diagonal::shared_ptr& corruptingNoise) { - ifstream is(filename.c_str()); - if (!is) throw invalid_argument("parse3DFactors: can not find file " + filename); +boost::optional> parseVertexPoint3(istream &is, + const string &tag) { + if (tag == "VERTEX_TRACKXYZ") { + size_t id; + double x, y, z; + is >> id >> x >> y >> z; + return make_pair(id, Point3(x, y, z)); + } else + return boost::none; +} - boost::optional sampler; - if (corruptingNoise) { - sampler = Sampler(corruptingNoise); - } +template <> +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexPoint3, maxIndex); +} - std::vector::shared_ptr> factors; - while (!is.eof()) { - char buf[LINESIZE]; - is.getline(buf, LINESIZE); - istringstream ls(buf); - string tag; - ls >> tag; +/* ************************************************************************* */ +// Parse a symmetric covariance matrix (onlyupper-triangular part is stored) +istream &operator>>(istream &is, Matrix6 &m) { + for (size_t i = 0; i < 6; i++) + for (size_t j = i; j < 6; j++) { + is >> m(i, j); + m(j, i) = m(i, j); + } + return is; +} +/* ************************************************************************* */ +// Pose3 measurement parser +template <> struct ParseMeasurement { + // The arguments + boost::shared_ptr sampler; + size_t maxIndex; + + // The actual parser + boost::optional> operator()(istream &is, + const string &tag) { + if (tag != "EDGE3" && tag != "EDGE_SE3:QUAT") + return boost::none; + + // parse ids and optionally filter + size_t id1, id2; + is >> id1 >> id2; + if (maxIndex && (id1 > maxIndex || id2 > maxIndex)) + return boost::none; + + Matrix6 m; if (tag == "EDGE3") { - Key id1, id2; - double x, y, z, roll, pitch, yaw; - ls >> id1 >> id2 >> x >> y >> z >> roll >> pitch >> yaw; - Matrix m(6, 6); - for (size_t i = 0; i < 6; i++) - for (size_t j = i; j < 6; j++) ls >> m(i, j); - SharedNoiseModel model = noiseModel::Gaussian::Information(m); - factors.emplace_back(new BetweenFactor( - id1, id2, Pose3(Rot3::Ypr(yaw, pitch, roll), {x, y, z}), model)); - } - if (tag == "EDGE_SE3:QUAT") { - Key id1, id2; - double x, y, z, qx, qy, qz, qw; - ls >> id1 >> id2 >> x >> y >> z >> qx >> qy >> qz >> qw; - Matrix m(6, 6); - for (size_t i = 0; i < 6; i++) { - for (size_t j = i; j < 6; j++) { - double mij; - ls >> mij; - m(i, j) = mij; - m(j, i) = mij; - } - } - Matrix mgtsam(6, 6); + double x, y, z; + Rot3 R; + is >> x >> y >> z >> R >> m; + Pose3 T12(R, {x, y, z}); + // optionally add noise + if (sampler) + T12 = T12.retract(sampler->sample()); - mgtsam.block<3, 3>(0, 0) = m.block<3, 3>(3, 3); // cov rotation - mgtsam.block<3, 3>(3, 3) = m.block<3, 3>(0, 0); // cov translation - mgtsam.block<3, 3>(0, 3) = m.block<3, 3>(0, 3); // off diagonal - mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0); // off diagonal + return BinaryMeasurement(id1, id2, T12, + noiseModel::Gaussian::Information(m)); + } else if (tag == "EDGE_SE3:QUAT") { + double x, y, z; + Quaternion q; + is >> x >> y >> z >> q >> m; + Pose3 T12(q, {x, y, z}); + // optionally add noise + if (sampler) + T12 = T12.retract(sampler->sample()); + + // EDGE_SE3:QUAT stores covariance in t,R order, unlike GTSAM: + Matrix6 mgtsam; + mgtsam.block<3, 3>(0, 0) = m.block<3, 3>(3, 3); // cov rotation + mgtsam.block<3, 3>(3, 3) = m.block<3, 3>(0, 0); // cov translation + mgtsam.block<3, 3>(0, 3) = m.block<3, 3>(0, 3); // off diagonal + mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0); // off diagonal SharedNoiseModel model = noiseModel::Gaussian::Information(mgtsam); - auto R12 = NormalizedRot3(qw, qx, qy, qz); - if (sampler) { - R12 = R12.retract(sampler->sample()); - } - factors.emplace_back(new BetweenFactor( - id1, id2, Pose3(R12, {x, y, z}), model)); - } + return BinaryMeasurement( + id1, id2, T12, noiseModel::Gaussian::Information(mgtsam)); + } else + return boost::none; } - return factors; +}; + +/* ************************************************************************* */ +// Implementation of parseMeasurements for Pose3 +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseMeasurement parse{model ? createSampler(model) : nullptr, + maxIndex}; + return parseToVector>(filename, parse); } /* ************************************************************************* */ -GraphAndValues load3D(const string& filename) { - const auto factors = parse3DFactors(filename); - NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); - for (const auto& factor : factors) { - graph->push_back(factor); - } +// Implementation of parseMeasurements for Rot3, which converts from Pose3 - Values::shared_ptr initial(new Values); +// Extract Rot3 measurement from Pose3 measurement +static BinaryMeasurement convert(const BinaryMeasurement &p) { + auto gaussian = + boost::dynamic_pointer_cast(p.noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose3 measurements " + "with Gaussian noise models."); + const Matrix6 M = gaussian->covariance(); + return BinaryMeasurement( + p.key1(), p.key2(), p.measured().rotation(), + noiseModel::Gaussian::Covariance(M.block<3, 3>(0, 0), true)); +} - const auto poses = parse3DPoses(filename); - for (const auto& key_pose : poses) { - initial->insert(key_pose.first, key_pose.second); - } - const auto landmarks = parse3DLandmarks(filename); - for (const auto& key_landmark : landmarks) { - initial->insert(key_landmark.first, key_landmark.second); - } +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + auto poses = parseMeasurements(filename, model, maxIndex); + std::vector> result; + result.reserve(poses.size()); + for (const auto &p : poses) + result.push_back(convert(p)); + return result; +} + +/* ************************************************************************* */ +// Implementation of parseFactors for Pose3 +template <> +std::vector::shared_ptr> +parseFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseFactor parse({model ? createSampler(model) : nullptr, maxIndex}); + return parseToVector::shared_ptr>(filename, parse); +} + +/* ************************************************************************* */ +GraphAndValues load3D(const string &filename) { + auto graph = boost::make_shared(); + auto initial = boost::make_shared(); + + // Instantiate factor parser. maxIndex is always zero for load3D. + ParseFactor parseFactor({nullptr, 0}); + + // Single pass for variables and factors. Unlike 2D version, does *not* insert + // variables into `initial` if referenced but not present. + Parser parse = [&](istream &is, const string &tag) { + if (auto indexedPose = parseVertexPose3(is, tag)) { + initial->insert(indexedPose->first, indexedPose->second); + } else if (auto indexedLandmark = parseVertexPoint3(is, tag)) { + initial->insert(L(indexedLandmark->first), indexedLandmark->second); + } else if (auto factor = parseFactor(is, tag)) { + graph->push_back(*factor); + } + return 0; + }; + parseLines(filename, parse); return make_pair(graph, initial); } /* ************************************************************************* */ -Rot3 openGLFixedRotation() { // this is due to different convention for cameras in gtsam and openGL +Rot3 openGLFixedRotation() { // this is due to different convention for + // cameras in gtsam and openGL /* R = [ 1 0 0 * 0 -1 0 * 0 0 -1] @@ -721,7 +960,7 @@ Rot3 openGLFixedRotation() { // this is due to different convention for cameras } /* ************************************************************************* */ -Pose3 openGL2gtsam(const Rot3& R, double tx, double ty, double tz) { +Pose3 openGL2gtsam(const Rot3 &R, double tx, double ty, double tz) { Rot3 R90 = openGLFixedRotation(); Rot3 wRc = (R.inverse()).compose(R90); @@ -730,7 +969,7 @@ Pose3 openGL2gtsam(const Rot3& R, double tx, double ty, double tz) { } /* ************************************************************************* */ -Pose3 gtsam2openGL(const Rot3& R, double tx, double ty, double tz) { +Pose3 gtsam2openGL(const Rot3 &R, double tx, double ty, double tz) { Rot3 R90 = openGLFixedRotation(); Rot3 cRw_openGL = R90.compose(R.inverse()); Point3 t_openGL = cRw_openGL.rotate(Point3(-tx, -ty, -tz)); @@ -738,13 +977,13 @@ Pose3 gtsam2openGL(const Rot3& R, double tx, double ty, double tz) { } /* ************************************************************************* */ -Pose3 gtsam2openGL(const Pose3& PoseGTSAM) { +Pose3 gtsam2openGL(const Pose3 &PoseGTSAM) { return gtsam2openGL(PoseGTSAM.rotation(), PoseGTSAM.x(), PoseGTSAM.y(), - PoseGTSAM.z()); + PoseGTSAM.z()); } /* ************************************************************************* */ -bool readBundler(const string& filename, SfmData &data) { +bool readBundler(const string &filename, SfmData &data) { // Load the data file ifstream is(filename.c_str(), ifstream::in); if (!is) { @@ -779,7 +1018,7 @@ bool readBundler(const string& filename, SfmData &data) { // Check for all-zero R, in which case quit if (r11 == 0 && r12 == 0 && r13 == 0) { cout << "Error in readBundler: zero rotation matrix for pose " << i - << endl; + << endl; return false; } @@ -831,7 +1070,7 @@ bool readBundler(const string& filename, SfmData &data) { } /* ************************************************************************* */ -bool readBAL(const string& filename, SfmData &data) { +bool readBAL(const string &filename, SfmData &data) { // Load the data file ifstream is(filename.c_str(), ifstream::in); if (!is) { @@ -879,7 +1118,7 @@ bool readBAL(const string& filename, SfmData &data) { // Get the 3D position float x, y, z; is >> x >> y >> z; - SfmTrack& track = data.tracks[j]; + SfmTrack &track = data.tracks[j]; track.p = Point3(x, y, z); track.r = 0.4f; track.g = 0.4f; @@ -891,7 +1130,7 @@ bool readBAL(const string& filename, SfmData &data) { } /* ************************************************************************* */ -bool writeBAL(const string& filename, SfmData &data) { +bool writeBAL(const string &filename, SfmData &data) { // Open the output file ofstream os; os.open(filename.c_str()); @@ -909,29 +1148,32 @@ bool writeBAL(const string& filename, SfmData &data) { // Write observations os << data.number_cameras() << " " << data.number_tracks() << " " - << nrObservations << endl; + << nrObservations << endl; os << endl; for (size_t j = 0; j < data.number_tracks(); j++) { // for each 3D point j - const SfmTrack& track = data.tracks[j]; + const SfmTrack &track = data.tracks[j]; - for (size_t k = 0; k < track.number_measurements(); k++) { // for each observation of the 3D point j + for (size_t k = 0; k < track.number_measurements(); + k++) { // for each observation of the 3D point j size_t i = track.measurements[k].first; // camera id double u0 = data.cameras[i].calibration().u0(); double v0 = data.cameras[i].calibration().v0(); if (u0 != 0 || v0 != 0) { - cout - << "writeBAL has not been tested for calibration with nonzero (u0,v0)" - << endl; + cout << "writeBAL has not been tested for calibration with nonzero " + "(u0,v0)" + << endl; } - double pixelBALx = track.measurements[k].second.x() - u0; // center of image is the origin - double pixelBALy = -(track.measurements[k].second.y() - v0); // center of image is the origin + double pixelBALx = track.measurements[k].second.x() - + u0; // center of image is the origin + double pixelBALy = -(track.measurements[k].second.y() - + v0); // center of image is the origin Point2 pixelMeasurement(pixelBALx, pixelBALy); - os << i /*camera id*/<< " " << j /*point id*/<< " " - << pixelMeasurement.x() /*u of the pixel*/<< " " - << pixelMeasurement.y() /*v of the pixel*/<< endl; + os << i /*camera id*/ << " " << j /*point id*/ << " " + << pixelMeasurement.x() /*u of the pixel*/ << " " + << pixelMeasurement.y() /*v of the pixel*/ << endl; } } os << endl; @@ -964,46 +1206,47 @@ bool writeBAL(const string& filename, SfmData &data) { return true; } -bool writeBALfromValues(const string& filename, const SfmData &data, - Values& values) { +bool writeBALfromValues(const string &filename, const SfmData &data, + Values &values) { using Camera = PinholeCamera; SfmData dataValues = data; // Store poses or cameras in SfmData size_t nrPoses = values.count(); - if (nrPoses == dataValues.number_cameras()) { // we only estimated camera poses - for (size_t i = 0; i < dataValues.number_cameras(); i++) { // for each camera - Key poseKey = symbol('x', i); - Pose3 pose = values.at(poseKey); + if (nrPoses == + dataValues.number_cameras()) { // we only estimated camera poses + for (size_t i = 0; i < dataValues.number_cameras(); + i++) { // for each camera + Pose3 pose = values.at(X(i)); Cal3Bundler K = dataValues.cameras[i].calibration(); Camera camera(pose, K); dataValues.cameras[i] = camera; } } else { size_t nrCameras = values.count(); - if (nrCameras == dataValues.number_cameras()) { // we only estimated camera poses and calibration - for (size_t i = 0; i < nrCameras; i++) { // for each camera - Key cameraKey = i; // symbol('c',i); + if (nrCameras == dataValues.number_cameras()) { // we only estimated camera + // poses and calibration + for (size_t i = 0; i < nrCameras; i++) { // for each camera + Key cameraKey = i; // symbol('c',i); Camera camera = values.at(cameraKey); dataValues.cameras[i] = camera; } } else { - cout - << "writeBALfromValues: different number of cameras in SfM_dataValues (#cameras " - << dataValues.number_cameras() << ") and values (#cameras " - << nrPoses << ", #poses " << nrCameras << ")!!" - << endl; + cout << "writeBALfromValues: different number of cameras in " + "SfM_dataValues (#cameras " + << dataValues.number_cameras() << ") and values (#cameras " + << nrPoses << ", #poses " << nrCameras << ")!!" << endl; return false; } } // Store 3D points in SfmData - size_t nrPoints = values.count(), nrTracks = dataValues.number_tracks(); + size_t nrPoints = values.count(), + nrTracks = dataValues.number_tracks(); if (nrPoints != nrTracks) { - cout - << "writeBALfromValues: different number of points in SfM_dataValues (#points= " - << nrTracks << ") and values (#points " - << nrPoints << ")!!" << endl; + cout << "writeBALfromValues: different number of points in " + "SfM_dataValues (#points= " + << nrTracks << ") and values (#points " << nrPoints << ")!!" << endl; } for (size_t j = 0; j < nrTracks; j++) { // for each point @@ -1015,7 +1258,7 @@ bool writeBALfromValues(const string& filename, const SfmData &data, dataValues.tracks[j].r = 1.0; dataValues.tracks[j].g = 0.0; dataValues.tracks[j].b = 0.0; - dataValues.tracks[j].p = Point3(0,0,0); + dataValues.tracks[j].p = Point3(0, 0, 0); } } @@ -1023,22 +1266,46 @@ bool writeBALfromValues(const string& filename, const SfmData &data, return writeBAL(filename, dataValues); } -Values initialCamerasEstimate(const SfmData& db) { +Values initialCamerasEstimate(const SfmData &db) { Values initial; size_t i = 0; // NO POINTS: j = 0; - for(const SfmCamera& camera: db.cameras) + for (const SfmCamera &camera : db.cameras) initial.insert(i++, camera); return initial; } -Values initialCamerasAndPointsEstimate(const SfmData& db) { +Values initialCamerasAndPointsEstimate(const SfmData &db) { Values initial; size_t i = 0, j = 0; - for(const SfmCamera& camera: db.cameras) + for (const SfmCamera &camera : db.cameras) initial.insert((i++), camera); - for(const SfmTrack& track: db.tracks) + for (const SfmTrack &track : db.tracks) initial.insert(P(j++), track.p); return initial; } -} // \namespace gtsam +// Wrapper-friendly versions of parseFactors and parseFactors +BetweenFactorPose2s +parse2DFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, size_t maxIndex) { + return parseFactors(filename, model, maxIndex); +} + +BetweenFactorPose3s +parse3DFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, size_t maxIndex) { + return parseFactors(filename, model, maxIndex); +} + +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 +std::map parse3DPoses(const std::string &filename, + size_t maxIndex) { + return parseVariables(filename, maxIndex); +} + +std::map parse3DLandmarks(const std::string &filename, + size_t maxIndex) { + return parseVariables(filename, maxIndex); +} +#endif +} // namespace gtsam diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index 30fa913ba..7ceca00f4 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -20,13 +20,12 @@ #pragma once +#include #include #include #include -#include -#include +#include #include -#include #include #include #include @@ -74,20 +73,42 @@ enum KernelFunctionType { KernelFunctionTypeNONE, KernelFunctionTypeHUBER, KernelFunctionTypeTUKEY }; -/// Return type for auxiliary functions -typedef std::pair IndexedPose; -typedef std::pair IndexedLandmark; -typedef std::pair, Pose2> IndexedEdge; - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 /** - * Parse TORO/G2O vertex "id x y yaw" - * @param is input stream - * @param tag string parsed from input stream, will only parse if vertex type + * Parse variables in a line-based text format (like g2o) into a map. + * Instantiated in .cpp Pose2, Point2, Pose3, and Point3. + * Note the map keys are integer indices, *not* gtsam::Keys. This is is + * different below where landmarks will use L(index) symbols. */ -GTSAM_EXPORT boost::optional parseVertex(std::istream& is, - const std::string& tag); -#endif +template +GTSAM_EXPORT std::map parseVariables(const std::string &filename, + size_t maxIndex = 0); + +/** + * Parse binary measurements in a line-based text format (like g2o) into a + * vector. Instantiated in .cpp for Pose2, Rot2, Pose3, and Rot3. The rotation + * versions parse poses and extract only the rotation part, using the marginal + * covariance as noise model. + */ +template +GTSAM_EXPORT std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); + +/** + * Parse BetweenFactors in a line-based text format (like g2o) into a vector of + * shared pointers. Instantiated in .cpp T equal to Pose2 and Pose3. + */ +template +GTSAM_EXPORT std::vector::shared_ptr> +parseFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); + +/// Return type for auxiliary functions +typedef std::pair IndexedPose; +typedef std::pair IndexedLandmark; +typedef std::pair, Pose2> IndexedEdge; /** * Parse TORO/G2O vertex "id x y yaw" @@ -102,7 +123,6 @@ GTSAM_EXPORT boost::optional parseVertexPose(std::istream& is, * @param is input stream * @param tag string parsed from input stream, will only parse if vertex type */ - GTSAM_EXPORT boost::optional parseVertexLandmark(std::istream& is, const std::string& tag); @@ -114,18 +134,21 @@ GTSAM_EXPORT boost::optional parseVertexLandmark(std::istream& GTSAM_EXPORT boost::optional parseEdge(std::istream& is, const std::string& tag); -/// Return type for load functions -typedef std::pair GraphAndValues; +/// Return type for load functions, which return a graph and initial values. For +/// landmarks, the gtsam::Symbol L(index) is used to insert into the Values. +/// Bearing-range measurements also refer to landmarks with L(index). +using GraphAndValues = + std::pair; /** * Load TORO 2D Graph * @param dataset/model pair as constructed by [dataset] - * @param maxID if non-zero cut out vertices >= maxID + * @param maxIndex if non-zero cut out vertices >= maxIndex * @param addNoise add noise to the edges * @param smart try to reduce complexity of covariance to cheapest model */ GTSAM_EXPORT GraphAndValues load2D( - std::pair dataset, int maxID = 0, + std::pair dataset, size_t maxIndex = 0, bool addNoise = false, bool smart = true, // NoiseFormat noiseFormat = NoiseFormatAUTO, @@ -135,7 +158,7 @@ GTSAM_EXPORT GraphAndValues load2D( * Load TORO/G2O style graph files * @param filename * @param model optional noise model to use instead of one specified by file - * @param maxID if non-zero cut out vertices >= maxID + * @param maxIndex if non-zero cut out vertices >= maxIndex * @param addNoise add noise to the edges * @param smart try to reduce complexity of covariance to cheapest model * @param noiseFormat how noise parameters are stored @@ -143,13 +166,13 @@ GTSAM_EXPORT GraphAndValues load2D( * @return graph and initial values */ GTSAM_EXPORT GraphAndValues load2D(const std::string& filename, - SharedNoiseModel model = SharedNoiseModel(), Key maxID = 0, bool addNoise = + SharedNoiseModel model = SharedNoiseModel(), size_t maxIndex = 0, bool addNoise = false, bool smart = true, NoiseFormat noiseFormat = NoiseFormatAUTO, // KernelFunctionType kernelFunctionType = KernelFunctionTypeNONE); /// @deprecated load2D now allows for arbitrary models and wrapping a robust kernel GTSAM_EXPORT GraphAndValues load2D_robust(const std::string& filename, - noiseModel::Base::shared_ptr& model, int maxID = 0); + const noiseModel::Base::shared_ptr& model, size_t maxIndex = 0); /** save 2d graph */ GTSAM_EXPORT void save2D(const NonlinearFactorGraph& graph, @@ -173,21 +196,15 @@ GTSAM_EXPORT GraphAndValues readG2o(const std::string& g2oFile, const bool is3D * @param filename The name of the g2o file to write * @param graph NonlinearFactor graph storing the measurements * @param estimate Values + * + * Note:behavior change in PR #471: to be consistent with load2D and load3D, we + * write the *indices* to file and not the full Keys. This change really only + * affects landmarks, which get read as indices but stored in values with the + * symbol L(index). */ GTSAM_EXPORT void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, const std::string& filename); -/// Parse edges in 3D TORO graph file into a set of BetweenFactors. -using BetweenFactorPose3s = std::vector::shared_ptr>; -GTSAM_EXPORT BetweenFactorPose3s parse3DFactors(const std::string& filename, - const noiseModel::Diagonal::shared_ptr& corruptingNoise=nullptr); - -/// Parse vertices in 3D TORO/g2o graph file into a map of Pose3s. -GTSAM_EXPORT std::map parse3DPoses(const std::string& filename); - -/// Parse landmarks in 3D g2o graph file into a map of Point3s. -GTSAM_EXPORT std::map parse3DLandmarks(const string& filename); - /// Load TORO 3D Graph GTSAM_EXPORT GraphAndValues load3D(const std::string& filename); @@ -324,4 +341,30 @@ GTSAM_EXPORT Values initialCamerasEstimate(const SfmData& db); */ GTSAM_EXPORT Values initialCamerasAndPointsEstimate(const SfmData& db); +// Wrapper-friendly versions of parseFactors and parseFactors +using BetweenFactorPose2s = std::vector::shared_ptr>; +GTSAM_EXPORT BetweenFactorPose2s +parse2DFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); + +using BetweenFactorPose3s = std::vector::shared_ptr>; +GTSAM_EXPORT BetweenFactorPose3s +parse3DFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); + +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 +inline boost::optional parseVertex(std::istream &is, + const std::string &tag) { + return parseVertexPose(is, tag); +} + +GTSAM_EXPORT std::map parse3DPoses(const std::string &filename, + size_t maxIndex = 0); + +GTSAM_EXPORT std::map +parse3DLandmarks(const std::string &filename, size_t maxIndex = 0); + +#endif } // namespace gtsam diff --git a/gtsam/slam/expressions.h b/gtsam/slam/expressions.h index d60923d8e..680f2d175 100644 --- a/gtsam/slam/expressions.h +++ b/gtsam/slam/expressions.h @@ -75,30 +75,6 @@ inline Unit3_ unrotate(const Rot3_& x, const Unit3_& p) { return Unit3_(x, &Rot3::unrotate, p); } -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS -namespace internal { -// define a rotate and unrotate for Vector3 -inline Vector3 rotate(const Rot3& R, const Vector3& v, - OptionalJacobian<3, 3> H1 = boost::none, - OptionalJacobian<3, 3> H2 = boost::none) { - return R.rotate(v, H1, H2); -} -inline Vector3 unrotate(const Rot3& R, const Vector3& v, - OptionalJacobian<3, 3> H1 = boost::none, - OptionalJacobian<3, 3> H2 = boost::none) { - return R.unrotate(v, H1, H2); -} -} // namespace internal -inline Expression rotate(const Rot3_& R, - const Expression& v) { - return Expression(internal::rotate, R, v); -} -inline Expression unrotate(const Rot3_& R, - const Expression& v) { - return Expression(internal::unrotate, R, v); -} -#endif - // Projection typedef Expression Cal3_S2_; diff --git a/gtsam/slam/lago.cpp b/gtsam/slam/lago.cpp index 76edc8b9d..70caa424f 100644 --- a/gtsam/slam/lago.cpp +++ b/gtsam/slam/lago.cpp @@ -17,6 +17,8 @@ */ #include + +#include #include #include #include @@ -33,7 +35,6 @@ namespace lago { static const Matrix I = I_1x1; static const Matrix I3 = I_3x3; -static const Key keyAnchor = symbol('Z', 9999999); static const noiseModel::Diagonal::shared_ptr priorOrientationNoise = noiseModel::Diagonal::Sigmas((Vector(1) << 0).finished()); static const noiseModel::Diagonal::shared_ptr priorPose2Noise = @@ -79,7 +80,7 @@ key2doubleMap computeThetasToRoot(const key2doubleMap& deltaThetaMap, key2doubleMap thetaToRootMap; // Orientation of the roo - thetaToRootMap.insert(pair(keyAnchor, 0.0)); + thetaToRootMap.insert(pair(initialize::kAnchorKey, 0.0)); // for all nodes in the tree for(const key2doubleMap::value_type& it: deltaThetaMap) { @@ -189,41 +190,16 @@ GaussianFactorGraph buildLinearOrientationGraph( lagoGraph.add(key1, -I, key2, I, deltaThetaRegularized, model_deltaTheta); } // prior on the anchor orientation - lagoGraph.add(keyAnchor, I, (Vector(1) << 0.0).finished(), priorOrientationNoise); + lagoGraph.add(initialize::kAnchorKey, I, (Vector(1) << 0.0).finished(), priorOrientationNoise); return lagoGraph; } -/* ************************************************************************* */ -// Select the subgraph of betweenFactors and transforms priors into between wrt a fictitious node -static NonlinearFactorGraph buildPose2graph(const NonlinearFactorGraph& graph) { - gttic(lago_buildPose2graph); - NonlinearFactorGraph pose2Graph; - - for(const boost::shared_ptr& factor: graph) { - - // recast to a between on Pose2 - boost::shared_ptr > pose2Between = - boost::dynamic_pointer_cast >(factor); - if (pose2Between) - pose2Graph.add(pose2Between); - - // recast PriorFactor to BetweenFactor - boost::shared_ptr > pose2Prior = - boost::dynamic_pointer_cast >(factor); - if (pose2Prior) - pose2Graph.add( - BetweenFactor(keyAnchor, pose2Prior->keys()[0], - pose2Prior->prior(), pose2Prior->noiseModel())); - } - return pose2Graph; -} - /* ************************************************************************* */ static PredecessorMap findOdometricPath( const NonlinearFactorGraph& pose2Graph) { PredecessorMap tree; - Key minKey = keyAnchor; // this initialization does not matter + Key minKey = initialize::kAnchorKey; // this initialization does not matter bool minUnassigned = true; for(const boost::shared_ptr& factor: pose2Graph) { @@ -240,8 +216,8 @@ static PredecessorMap findOdometricPath( minKey = key1; } } - tree.insert(minKey, keyAnchor); - tree.insert(keyAnchor, keyAnchor); // root + tree.insert(minKey, initialize::kAnchorKey); + tree.insert(initialize::kAnchorKey, initialize::kAnchorKey); // root return tree; } @@ -284,7 +260,7 @@ VectorValues initializeOrientations(const NonlinearFactorGraph& graph, // We "extract" the Pose2 subgraph of the original graph: this // is done to properly model priors and avoiding operating on a larger graph - NonlinearFactorGraph pose2Graph = buildPose2graph(graph); + NonlinearFactorGraph pose2Graph = initialize::buildPoseGraph(graph); // Get orientations from relative orientation measurements return computeOrientations(pose2Graph, useOdometricPath); @@ -338,7 +314,7 @@ Values computePoses(const NonlinearFactorGraph& pose2graph, } } // add prior - linearPose2graph.add(keyAnchor, I3, Vector3(0.0, 0.0, 0.0), + linearPose2graph.add(initialize::kAnchorKey, I3, Vector3(0.0, 0.0, 0.0), priorPose2Noise); // optimize @@ -348,7 +324,7 @@ Values computePoses(const NonlinearFactorGraph& pose2graph, Values initialGuessLago; for(const VectorValues::value_type& it: posesLago) { Key key = it.first; - if (key != keyAnchor) { + if (key != initialize::kAnchorKey) { const Vector& poseVector = it.second; Pose2 poseLago = Pose2(poseVector(0), poseVector(1), orientationsLago.at(key)(0) + poseVector(2)); @@ -364,7 +340,7 @@ Values initialize(const NonlinearFactorGraph& graph, bool useOdometricPath) { // We "extract" the Pose2 subgraph of the original graph: this // is done to properly model priors and avoiding operating on a larger graph - NonlinearFactorGraph pose2Graph = buildPose2graph(graph); + NonlinearFactorGraph pose2Graph = initialize::buildPoseGraph(graph); // Get orientations from relative orientation measurements VectorValues orientationsLago = computeOrientations(pose2Graph, @@ -385,7 +361,7 @@ Values initialize(const NonlinearFactorGraph& graph, // for all nodes in the tree for(const VectorValues::value_type& it: orientations) { Key key = it.first; - if (key != keyAnchor) { + if (key != initialize::kAnchorKey) { const Pose2& pose = initialGuess.at(key); const Vector& orientation = it.second; Pose2 poseLago = Pose2(pose.x(), pose.y(), orientation(0)); diff --git a/gtsam/slam/tests/testDataset.cpp b/gtsam/slam/tests/testDataset.cpp index b6b1776d2..aad9124c5 100644 --- a/gtsam/slam/tests/testDataset.cpp +++ b/gtsam/slam/tests/testDataset.cpp @@ -82,38 +82,73 @@ TEST( dataSet, parseEdge) const auto actual = parseEdge(is, tag); EXPECT(actual); if (actual) { - pair expected(0, 1); + pair expected(0, 1); EXPECT(expected == actual->first); EXPECT(assert_equal(Pose2(2, 3, 4), actual->second)); } } /* ************************************************************************* */ -TEST( dataSet, load2D) -{ +TEST(dataSet, load2D) { ///< The structure where we will save the SfM data const string filename = findExampleDataFile("w100.graph"); NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; boost::tie(graph, initial) = load2D(filename); - EXPECT_LONGS_EQUAL(300,graph->size()); - EXPECT_LONGS_EQUAL(100,initial->size()); - noiseModel::Unit::shared_ptr model = noiseModel::Unit::Create(3); - BetweenFactor expected(1, 0, Pose2(-0.99879,0.0417574,-0.00818381), model); - BetweenFactor::shared_ptr actual = boost::dynamic_pointer_cast< - BetweenFactor >(graph->at(0)); + EXPECT_LONGS_EQUAL(300, graph->size()); + EXPECT_LONGS_EQUAL(100, initial->size()); + auto model = noiseModel::Unit::Create(3); + BetweenFactor expected(1, 0, Pose2(-0.99879, 0.0417574, -0.00818381), + model); + BetweenFactor::shared_ptr actual = + boost::dynamic_pointer_cast>(graph->at(0)); EXPECT(assert_equal(expected, *actual)); + + // Check binary measurements, Pose2 + size_t maxIndex = 5; + auto measurements = parseMeasurements(filename, nullptr, maxIndex); + EXPECT_LONGS_EQUAL(5, measurements.size()); + + // Check binary measurements, Rot2 + auto measurements2 = parseMeasurements(filename); + EXPECT_LONGS_EQUAL(300, measurements2.size()); + + // // Check factor parsing + const auto actualFactors = parseFactors(filename); + for (size_t i : {0, 1, 2, 3, 4, 5}) { + EXPECT(assert_equal( + *boost::dynamic_pointer_cast>(graph->at(i)), + *actualFactors[i], 1e-5)); + } + + // Check pose parsing + const auto actualPoses = parseVariables(filename); + for (size_t j : {0, 1, 2, 3, 4}) { + EXPECT(assert_equal(initial->at(j), actualPoses.at(j), 1e-5)); + } + + // Check landmark parsing + const auto actualLandmarks = parseVariables(filename); + EXPECT_LONGS_EQUAL(0, actualLandmarks.size()); } /* ************************************************************************* */ -TEST( dataSet, load2DVictoriaPark) -{ +TEST(dataSet, load2DVictoriaPark) { const string filename = findExampleDataFile("victoria_park.txt"); NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; + + // Load all boost::tie(graph, initial) = load2D(filename); - EXPECT_LONGS_EQUAL(10608,graph->size()); - EXPECT_LONGS_EQUAL(7120,initial->size()); + EXPECT_LONGS_EQUAL(10608, graph->size()); + EXPECT_LONGS_EQUAL(7120, initial->size()); + + // Restrict keys + size_t maxIndex = 5; + boost::tie(graph, initial) = load2D(filename, nullptr, maxIndex); + EXPECT_LONGS_EQUAL(5, graph->size()); + EXPECT_LONGS_EQUAL(6, initial->size()); // file has 0 as well + EXPECT_LONGS_EQUAL(L(5), graph->at(4)->keys()[1]); } /* ************************************************************************* */ @@ -184,7 +219,7 @@ TEST(dataSet, readG2o3D) { } // Check factor parsing - const auto actualFactors = parse3DFactors(g2oFile); + const auto actualFactors = parseFactors(g2oFile); for (size_t i : {0, 1, 2, 3, 4, 5}) { EXPECT(assert_equal( *boost::dynamic_pointer_cast>(expectedGraph[i]), @@ -192,13 +227,13 @@ TEST(dataSet, readG2o3D) { } // Check pose parsing - const auto actualPoses = parse3DPoses(g2oFile); + const auto actualPoses = parseVariables(g2oFile); for (size_t j : {0, 1, 2, 3, 4}) { EXPECT(assert_equal(poses[j], actualPoses.at(j), 1e-5)); } // Check landmark parsing - const auto actualLandmarks = parse3DLandmarks(g2oFile); + const auto actualLandmarks = parseVariables(g2oFile); for (size_t j : {0, 1, 2, 3, 4}) { EXPECT(assert_equal(poses[j], actualPoses.at(j), 1e-5)); } @@ -259,7 +294,7 @@ TEST(dataSet, readG2oCheckDeterminants) { const string g2oFile = findExampleDataFile("toyExample.g2o"); // Check determinants in factors - auto factors = parse3DFactors(g2oFile); + auto factors = parseFactors(g2oFile); EXPECT_LONGS_EQUAL(6, factors.size()); for (const auto& factor : factors) { const Rot3 R = factor->measured().rotation(); @@ -267,13 +302,13 @@ TEST(dataSet, readG2oCheckDeterminants) { } // Check determinants in initial values - const map poses = parse3DPoses(g2oFile); + const map poses = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(5, poses.size()); for (const auto& key_value : poses) { const Rot3 R = key_value.second.rotation(); EXPECT_DOUBLES_EQUAL(1.0, R.matrix().determinant(), 1e-9); } - const map landmarks = parse3DLandmarks(g2oFile); + const map landmarks = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(0, landmarks.size()); } @@ -282,10 +317,13 @@ TEST(dataSet, readG2oLandmarks) { const string g2oFile = findExampleDataFile("example_with_vertices.g2o"); // Check number of poses and landmarks. Should be 8 each. - const map poses = parse3DPoses(g2oFile); + const map poses = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(8, poses.size()); - const map landmarks = parse3DLandmarks(g2oFile); + const map landmarks = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(8, landmarks.size()); + + auto graphAndValues = load3D(g2oFile); + EXPECT(graphAndValues.second->exists(L(0))); } /* ************************************************************************* */ @@ -538,14 +576,12 @@ TEST( dataSet, writeBALfromValues_Dubrovnik){ Values value; for(size_t i=0; i < readData.number_cameras(); i++){ // for each camera - Key poseKey = symbol('x',i); Pose3 pose = poseChange.compose(readData.cameras[i].pose()); - value.insert(poseKey, pose); + value.insert(X(i), pose); } for(size_t j=0; j < readData.number_tracks(); j++){ // for each point - Key pointKey = P(j); Point3 point = poseChange.transformFrom( readData.tracks[j].p ); - value.insert(pointKey, point); + value.insert(P(j), point); } // Write values and readData to a file @@ -570,13 +606,11 @@ TEST( dataSet, writeBALfromValues_Dubrovnik){ EXPECT(assert_equal(expected,actual,12)); Pose3 expectedPose = camera0.pose(); - Key poseKey = symbol('x',0); - Pose3 actualPose = value.at(poseKey); + Pose3 actualPose = value.at(X(0)); EXPECT(assert_equal(expectedPose,actualPose, 1e-7)); Point3 expectedPoint = track0.p; - Key pointKey = P(0); - Point3 actualPoint = value.at(pointKey); + Point3 actualPoint = value.at(P(0)); EXPECT(assert_equal(expectedPoint,actualPoint, 1e-6)); } diff --git a/gtsam/slam/tests/testFrobeniusFactor.cpp b/gtsam/slam/tests/testFrobeniusFactor.cpp index 9cb0c19fa..321b54c86 100644 --- a/gtsam/slam/tests/testFrobeniusFactor.cpp +++ b/gtsam/slam/tests/testFrobeniusFactor.cpp @@ -188,54 +188,6 @@ TEST(FrobeniusBetweenFactorSO4, evaluateError) { EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); } -//****************************************************************************** -namespace submanifold { -SO4 id; -Vector6 v1 = (Vector(6) << 0, 0, 0, 0.1, 0, 0).finished(); -SO3 R1 = SO3::Expmap(v1.tail<3>()); -SO4 Q1 = SO4::Expmap(v1); -Vector6 v2 = (Vector(6) << 0, 0, 0, 0.01, 0.02, 0.03).finished(); -SO3 R2 = SO3::Expmap(v2.tail<3>()); -SO4 Q2 = SO4::Expmap(v2); -SO3 R12 = R1.between(R2); -} // namespace submanifold - -/* ************************************************************************* */ -TEST(FrobeniusWormholeFactor, evaluateError) { - auto model = noiseModel::Isotropic::Sigma(6, 1.2); // dimension = 6 not 16 - for (const size_t p : {5, 4, 3}) { - Matrix M = Matrix::Identity(p, p); - M.topLeftCorner(3, 3) = submanifold::R1.matrix(); - SOn Q1(M); - M.topLeftCorner(3, 3) = submanifold::R2.matrix(); - SOn Q2(M); - auto factor = - FrobeniusWormholeFactor(1, 2, Rot3(::so3::R12.matrix()), p, model); - Matrix H1, H2; - factor.evaluateError(Q1, Q2, H1, H2); - - // Test derivatives - Values values; - values.insert(1, Q1); - values.insert(2, Q2); - EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); - } -} - -/* ************************************************************************* */ -TEST(FrobeniusWormholeFactor, equivalenceToSO3) { - using namespace ::submanifold; - auto R12 = ::so3::R12.retract(Vector3(0.1, 0.2, -0.1)); - auto model = noiseModel::Isotropic::Sigma(6, 1.2); // wrong dimension - auto factor3 = FrobeniusBetweenFactor(1, 2, R12, model); - auto factor4 = FrobeniusWormholeFactor(1, 2, Rot3(R12.matrix()), 4, model); - const Matrix3 E3(factor3.evaluateError(R1, R2).data()); - const Matrix43 E4( - factor4.evaluateError(SOn(Q1.matrix()), SOn(Q2.matrix())).data()); - EXPECT(assert_equal((Matrix)E4.topLeftCorner<3, 3>(), E3, 1e-9)); - EXPECT(assert_equal((Matrix)E4.row(3), Matrix13::Zero(), 1e-9)); -} - /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/slam/tests/testInitializePose.cpp b/gtsam/slam/tests/testInitializePose.cpp new file mode 100644 index 000000000..1373cb576 --- /dev/null +++ b/gtsam/slam/tests/testInitializePose.cpp @@ -0,0 +1,82 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testInitializePose3.cpp + * @brief Unit tests for 3D SLAM initialization, using rotation relaxation + * + * @author Luca Carlone + * @author Frank Dellaert + * @date August, 2014 + */ + +#include + +#include +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +/* ************************************************************************* */ +TEST(InitializePose3, computePoses2D) { + const string g2oFile = findExampleDataFile("noisyToyGraph.txt"); + NonlinearFactorGraph::shared_ptr inputGraph; + Values::shared_ptr posesInFile; + bool is3D = false; + boost::tie(inputGraph, posesInFile) = readG2o(g2oFile, is3D); + + auto priorModel = noiseModel::Unit::Create(3); + inputGraph->addPrior(0, posesInFile->at(0), priorModel); + + auto poseGraph = initialize::buildPoseGraph(*inputGraph); + + auto I = genericValue(Rot3()); + Values orientations; + for (size_t i : {0, 1, 2, 3}) + orientations.insert(i, posesInFile->at(i).rotation()); + const Values poses = initialize::computePoses(orientations, &poseGraph); + + // posesInFile is seriously noisy, so we check error of recovered poses + EXPECT_DOUBLES_EQUAL(0.0810283, inputGraph->error(poses), 1e-6); +} + +/* ************************************************************************* */ +TEST(InitializePose3, computePoses3D) { + const string g2oFile = findExampleDataFile("Klaus3"); + NonlinearFactorGraph::shared_ptr inputGraph; + Values::shared_ptr posesInFile; + bool is3D = true; + boost::tie(inputGraph, posesInFile) = readG2o(g2oFile, is3D); + + auto priorModel = noiseModel::Unit::Create(6); + inputGraph->addPrior(0, posesInFile->at(0), priorModel); + + auto poseGraph = initialize::buildPoseGraph(*inputGraph); + + auto I = genericValue(Rot3()); + Values orientations; + for (size_t i : {0, 1, 2}) + orientations.insert(i, posesInFile->at(i).rotation()); + Values poses = initialize::computePoses(orientations, &poseGraph); + EXPECT(assert_equal(*posesInFile, poses, 0.1)); // TODO(frank): very loose !! +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/slam/tests/testInitializePose3.cpp b/gtsam/slam/tests/testInitializePose3.cpp index ba41cdc9b..01ec9edb1 100644 --- a/gtsam/slam/tests/testInitializePose3.cpp +++ b/gtsam/slam/tests/testInitializePose3.cpp @@ -121,7 +121,7 @@ TEST( InitializePose3, orientationsGradientSymbolicGraph ) { KeyVectorMap adjEdgesMap; KeyRotMap factorId2RotMap; - InitializePose3::createSymbolicGraph(adjEdgesMap, factorId2RotMap, pose3Graph); + InitializePose3::createSymbolicGraph(pose3Graph, &adjEdgesMap, &factorId2RotMap); EXPECT_DOUBLES_EQUAL(adjEdgesMap.at(x0)[0], 0, 1e-9); EXPECT_DOUBLES_EQUAL(adjEdgesMap.at(x0)[1], 3, 1e-9); @@ -258,21 +258,21 @@ TEST( InitializePose3, posesWithGivenGuess ) { } /* ************************************************************************* */ -TEST( InitializePose3, initializePoses ) -{ +TEST(InitializePose3, initializePoses) { const string g2oFile = findExampleDataFile("pose3example-grid"); NonlinearFactorGraph::shared_ptr inputGraph; - Values::shared_ptr expectedValues; + Values::shared_ptr posesInFile; bool is3D = true; - boost::tie(inputGraph, expectedValues) = readG2o(g2oFile, is3D); - noiseModel::Unit::shared_ptr priorModel = noiseModel::Unit::Create(6); + boost::tie(inputGraph, posesInFile) = readG2o(g2oFile, is3D); + + auto priorModel = noiseModel::Unit::Create(6); inputGraph->addPrior(0, Pose3(), priorModel); Values initial = InitializePose3::initialize(*inputGraph); - EXPECT(assert_equal(*expectedValues,initial,0.1)); // TODO(frank): very loose !! + EXPECT(assert_equal(*posesInFile, initial, + 0.1)); // TODO(frank): very loose !! } - /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam_unstable/CMakeLists.txt b/gtsam_unstable/CMakeLists.txt index 010b32710..ec161baa8 100644 --- a/gtsam_unstable/CMakeLists.txt +++ b/gtsam_unstable/CMakeLists.txt @@ -108,7 +108,7 @@ list(APPEND GTSAM_EXPORTED_TARGETS gtsam_unstable) set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) # Wrap version for gtsam_unstable -if (GTSAM_INSTALL_MATLAB_TOOLBOX) +if (GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX) # Set up codegen include(GtsamMatlabWrap) @@ -119,8 +119,8 @@ if (GTSAM_INSTALL_MATLAB_TOOLBOX) endif() # Wrap - wrap_and_install_library(gtsam_unstable.h "gtsam" "" "${mexFlags}") -endif(GTSAM_INSTALL_MATLAB_TOOLBOX) + wrap_and_install_library(gtsam_unstable.i "gtsam" "" "${mexFlags}") +endif(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX) # Build examples diff --git a/gtsam_unstable/gtsam_unstable.h b/gtsam_unstable/gtsam_unstable.i similarity index 99% rename from gtsam_unstable/gtsam_unstable.h rename to gtsam_unstable/gtsam_unstable.i index ef2d16bf0..05b30bb0b 100644 --- a/gtsam_unstable/gtsam_unstable.h +++ b/gtsam_unstable/gtsam_unstable.i @@ -12,10 +12,14 @@ class gtsam::Point2Vector; class gtsam::Rot2; class gtsam::Pose2; class gtsam::Point3; +class gtsam::SO3; +class gtsam::SO4; +class gtsam::SOn; class gtsam::Rot3; class gtsam::Pose3; virtual class gtsam::noiseModel::Base; virtual class gtsam::noiseModel::Gaussian; +virtual class gtsam::noiseModel::Isotropic; virtual class gtsam::imuBias::ConstantBias; virtual class gtsam::NonlinearFactor; virtual class gtsam::NoiseModelFactor; @@ -39,6 +43,7 @@ class gtsam::KeyVector; class gtsam::LevenbergMarquardtParams; class gtsam::ISAM2Params; class gtsam::GaussianDensity; +class gtsam::LevenbergMarquardtOptimizer; namespace gtsam { @@ -282,7 +287,6 @@ virtual class PriorFactor : gtsam::NoiseModelFactor { void serializable() const; // enabling serialization functionality }; - #include template virtual class BetweenFactor : gtsam::NoiseModelFactor { diff --git a/gtsam_unstable/slam/BetweenFactorEM.h b/gtsam_unstable/slam/BetweenFactorEM.h index d551209c9..43607ac41 100644 --- a/gtsam_unstable/slam/BetweenFactorEM.h +++ b/gtsam_unstable/slam/BetweenFactorEM.h @@ -114,7 +114,7 @@ public: /** implement functions needed to derive from Factor */ /* ************************************************************************* */ - virtual double error(const Values& x) const { + double error(const Values &x) const override { return whitenedError(x).squaredNorm(); } @@ -125,8 +125,7 @@ public: * Hence \f$ b = z - h(x) = - \mathtt{error\_vector}(x) \f$ */ /* This version of linearize recalculates the noise model each time */ - virtual boost::shared_ptr linearize( - const Values& x) const { + boost::shared_ptr linearize(const Values &x) const override { // Only linearize if the factor is active if (!this->active(x)) return boost::shared_ptr(); diff --git a/gtsam_unstable/timing/process_shonan_timing_results.py b/gtsam_unstable/timing/process_shonan_timing_results.py new file mode 100644 index 000000000..9cf934dba --- /dev/null +++ b/gtsam_unstable/timing/process_shonan_timing_results.py @@ -0,0 +1,215 @@ +""" +Process timing results from timeShonanAveraging +""" + +import xlrd +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.ticker import FuncFormatter +import heapq +from collections import Counter + +def make_combined_plot(name, p_values, times, costs, min_cost_range=10): + """ Make a plot that combines timing and SO(3) cost. + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times: list of timings (seconds) + costs: list of costs (double) + Will calculate the range of the costs, default minimum range = 10.0 + """ + min_cost = min(costs) + cost_range = max(max(costs)-min_cost,min_cost_range) + fig = plt.figure() + ax1 = fig.add_subplot(111) + ax1.plot(p_values, times, label="time") + ax1.set_ylabel('Time used to optimize \ seconds') + ax1.set_xlabel('p_value') + ax2 = ax1.twinx() + ax2.plot(p_values, costs, 'r', label="cost") + ax2.set_ylabel('Cost at SO(3) form') + ax2.set_xlabel('p_value') + ax2.set_xticks(p_values) + ax2.set_ylim(min_cost, min_cost + cost_range) + plt.title(name, fontsize=12) + ax1.legend(loc="upper left") + ax2.legend(loc="upper right") + plt.interactive(False) + plt.show() + +def make_convergence_plot(name, p_values, times, costs, iter=10): + """ Make a bar that show the success rate for each p_value according to whether the SO(3) cost converges + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times: list of timings (seconds) + costs: list of costs (double) + iter: int of iteration number for each p_value + """ + + max_cost = np.mean(np.array(heapq.nlargest(iter, costs))) + # calculate mean costs for each p value + p_values = list(dict(Counter(p_values)).keys()) + # make sure the iter number + iter = int(len(times)/len(p_values)) + p_mean_cost = [np.mean(np.array(costs[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_max = p_values[p_mean_cost.index(max(p_mean_cost))] + # print(p_mean_cost) + # print(p_max) + + #take mean and make the combined plot + mean_times = [] + mean_costs = [] + for p in p_values: + costs_tmp = costs[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + mean_cost = sum(costs_tmp)/len(costs_tmp) + mean_costs.append(mean_cost) + times_tmp = times[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + mean_time = sum(times_tmp)/len(times_tmp) + mean_times.append(mean_time) + make_combined_plot(name, p_values,mean_times, mean_costs) + + # calculate the convergence rate for each p_value + p_success_rates = [] + if p_mean_cost[0] >= 0.95*np.mean(np.array(costs)) and p_mean_cost[0] <= 1.05*np.mean(np.array(costs)): + p_success_rates = [ 1.0 for p in p_values] + else: + for p in p_values: + if p > p_max: + p_costs = costs[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + # print(p_costs) + converged = [ int(p_cost < 0.3*max_cost) for p_cost in p_costs] + success_rate = sum(converged)/len(converged) + p_success_rates.append(success_rate) + else: + p_success_rates.append(0) + + plt.bar(p_values, p_success_rates, align='center', alpha=0.5) + plt.xticks(p_values) + plt.yticks(np.arange(0, 1.2, 0.2), ['0%', '20%', '40%', '60%', '80%', '100%']) + plt.xlabel("p_value") + plt.ylabel("success rate") + plt.title(name, fontsize=12) + plt.show() + +def make_eigen_and_bound_plot(name, p_values, times1, costPs, cost3s, times2, min_eigens, subounds): + """ Make a plot that combines time for optimizing, time for optimizing and compute min_eigen, + min_eigen, subound (subound = (f_R - f_SDP) / f_SDP). + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times1: list of timings (seconds) + costPs: f_SDP + cost3s: f_R + times2: list of timings (seconds) + min_eigens: list of min_eigen (double) + subounds: list of subound (double) + """ + + if dict(Counter(p_values))[5] != 1: + p_values = list(dict(Counter(p_values)).keys()) + iter = int(len(times1)/len(p_values)) + p_mean_times1 = [np.mean(np.array(times1[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_times2 = [np.mean(np.array(times2[i*iter:(i+1)*iter])) for i in range(len(p_values))] + print("p_values \n", p_values) + print("p_mean_times_opti \n", p_mean_times1) + print("p_mean_times_eig \n", p_mean_times2) + + p_mean_costPs = [np.mean(np.array(costPs[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_cost3s = [np.mean(np.array(cost3s[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_lambdas = [np.mean(np.array(min_eigens[i*iter:(i+1)*iter])) for i in range(len(p_values))] + + print("p_mean_costPs \n", p_mean_costPs) + print("p_mean_cost3s \n", p_mean_cost3s) + print("p_mean_lambdas \n", p_mean_lambdas) + print("*******************************************************************************************************************") + + + else: + plt.figure(1) + plt.ylabel('Time used (seconds)') + plt.xlabel('p_value') + plt.plot(p_values, times1, 'r', label="time for optimizing") + plt.plot(p_values, times2, 'blue', label="time for optimizing and check") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.interactive(False) + plt.show() + + plt.figure(2) + plt.ylabel('Min eigen_value') + plt.xlabel('p_value') + plt.plot(p_values, min_eigens, 'r', label="min_eigen values") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.interactive(False) + plt.show() + + plt.figure(3) + plt.ylabel('sub_bounds') + plt.xlabel('p_value') + plt.plot(p_values, subounds, 'blue', label="sub_bounds") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.show() + +# Process arguments and call plot function +import argparse +import csv +import os + +parser = argparse.ArgumentParser() +parser.add_argument("path") +args = parser.parse_args() + + +file_path = [] +domain = os.path.abspath(args.path) +for info in os.listdir(args.path): + file_path.append(os.path.join(domain, info)) +file_path.sort() +print(file_path) + + +# name of all the plots +names = {} +names[0] = 'tinyGrid3D vertex = 9, edge = 11' +names[1] = 'smallGrid3D vertex = 125, edge = 297' +names[2] = 'parking-garage vertex = 1661, edge = 6275' +names[3] = 'sphere2500 vertex = 2500, edge = 4949' +# names[4] = 'sphere_bignoise vertex = 2200, edge = 8647' +names[5] = 'torus3D vertex = 5000, edge = 9048' +names[6] = 'cubicle vertex = 5750, edge = 16869' +names[7] = 'rim vertex = 10195, edge = 29743' + +# Parse CSV file +for key, name in names.items(): + print(key, name) + + # find according file to process + name_file = None + for path in file_path: + if name[0:3] in path: + name_file = path + if name_file == None: + print("The file %s is not in the path" % name) + continue + + p_values, times1, costPs, cost3s, times2, min_eigens, subounds = [],[],[],[],[],[],[] + with open(name_file) as csvfile: + reader = csv.reader(csvfile, delimiter='\t') + for row in reader: + print(row) + p_values.append(int(row[0])) + times1.append(float(row[1])) + costPs.append(float(row[2])) + cost3s.append(float(row[3])) + if len(row) > 4: + times2.append(float(row[4])) + min_eigens.append(float(row[5])) + subounds.append(float(row[6])) + + #plot + # make_combined_plot(name, p_values, times1, cost3s) + # make_convergence_plot(name, p_values, times1, cost3s) + make_eigen_and_bound_plot(name, p_values, times1, costPs, cost3s, times2, min_eigens, subounds) diff --git a/gtsam_unstable/timing/timeShonanAveraging.cpp b/gtsam_unstable/timing/timeShonanAveraging.cpp new file mode 100644 index 000000000..d42635404 --- /dev/null +++ b/gtsam_unstable/timing/timeShonanAveraging.cpp @@ -0,0 +1,180 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanAveraging.cpp + * @date September 2019 + * @author Jing Wu + * @author Frank Dellaert + * @brief Timing script for Shonan Averaging algorithm + */ + +#include "gtsam/base/Matrix.h" +#include "gtsam/base/Vector.h" +#include "gtsam/geometry/Point3.h" +#include "gtsam/geometry/Rot3.h" +#include +#include + +#include + +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +// save a single line of timing info to an output stream +void saveData(size_t p, double time1, double costP, double cost3, double time2, + double min_eigenvalue, double suBound, std::ostream* os) { + *os << static_cast(p) << "\t" << time1 << "\t" << costP << "\t" << cost3 + << "\t" << time2 << "\t" << min_eigenvalue << "\t" << suBound << endl; +} + +void checkR(const Matrix& R) { + Matrix R2 = R.transpose(); + Matrix actual_R = R2 * R; + assert_equal(Rot3(), Rot3(actual_R)); +} + +void saveResult(string name, const Values& values) { + ofstream myfile; + myfile.open("shonan_result_of_" + name + ".dat"); + size_t nrSO3 = values.count(); + myfile << "#Type SO3 Number " << nrSO3 << "\n"; + for (int i = 0; i < nrSO3; ++i) { + Matrix R = values.at(i).matrix(); + // Check if the result of R.Transpose*R satisfy orthogonal constraint + checkR(R); + myfile << i; + for (int m = 0; m < 3; ++m) { + for (int n = 0; n < 3; ++n) { + myfile << " " << R(m, n); + } + } + myfile << "\n"; + } + myfile.close(); + cout << "Saved shonan_result.dat file" << endl; +} + +void saveG2oResult(string name, const Values& values, std::map poses) { + ofstream myfile; + myfile.open("shonan_result_of_" + name + ".g2o"); + size_t nrSO3 = values.count(); + for (int i = 0; i < nrSO3; ++i) { + Matrix R = values.at(i).matrix(); + // Check if the result of R.Transpose*R satisfy orthogonal constraint + checkR(R); + myfile << "VERTEX_SE3:QUAT" << " "; + myfile << i << " "; + myfile << poses[i].x() << " " << poses[i].y() << " " << poses[i].z() << " "; + Vector quaternion = Rot3(R).quaternion(); + myfile << quaternion(3) << " " << quaternion(2) << " " << quaternion(1) + << " " << quaternion(0); + myfile << "\n"; + } + myfile.close(); + cout << "Saved shonan_result.g2o file" << endl; +} + +void saveResultQuat(const Values& values) { + ofstream myfile; + myfile.open("shonan_result.dat"); + size_t nrSOn = values.count(); + for (int i = 0; i < nrSOn; ++i) { + GTSAM_PRINT(values.at(i)); + Rot3 R = Rot3(values.at(i).matrix()); + float x = R.toQuaternion().x(); + float y = R.toQuaternion().y(); + float z = R.toQuaternion().z(); + float w = R.toQuaternion().w(); + myfile << "QuatSO3 " << i; + myfile << "QuatSO3 " << i << " " << w << " " << x << " " << y << " " << z << "\n"; + myfile.close(); + } +} + +int main(int argc, char* argv[]) { + // primitive argument parsing: + if (argc > 3) { + throw runtime_error("Usage: timeShonanAveraging [g2oFile]"); + } + + string g2oFile; + try { + if (argc > 1) + g2oFile = argv[argc - 1]; + else + g2oFile = findExampleDataFile("sphere2500"); + + } catch (const exception& e) { + cerr << e.what() << '\n'; + exit(1); + } + + // Create a csv output file + size_t pos1 = g2oFile.find("data/"); + size_t pos2 = g2oFile.find(".g2o"); + string name = g2oFile.substr(pos1 + 5, pos2 - pos1 - 5); + cout << name << endl; + ofstream csvFile("shonan_timing_of_" + name + ".csv"); + + // Create Shonan averaging instance from the file. + // ShonanAveragingParameters parameters; + // double sigmaNoiseInRadians = 0 * M_PI / 180; + // parameters.setNoiseSigma(sigmaNoiseInRadians); + static const ShonanAveraging3 kShonan(g2oFile); + + // increase p value and try optimize using Shonan Algorithm. use chrono for + // timing + size_t pMin = 3; + Values Qstar; + Vector minEigenVector; + double CostP = 0, Cost3 = 0, lambdaMin = 0, suBound = 0; + cout << "(int)p" << "\t" << "time1" << "\t" << "costP" << "\t" << "cost3" << "\t" + << "time2" << "\t" << "MinEigenvalue" << "\t" << "SuBound" << endl; + + const Values randomRotations = kShonan.initializeRandomly(); + + for (size_t p = pMin; p <= 7; p++) { + // Randomly initialize at lowest level, initialize by line search after that + const Values initial = + (p > pMin) ? kShonan.initializeWithDescent(p, Qstar, minEigenVector, + lambdaMin) + : ShonanAveraging3::LiftTo(pMin, randomRotations); + chrono::steady_clock::time_point t1 = chrono::steady_clock::now(); + // optimizing + const Values result = kShonan.tryOptimizingAt(p, initial); + chrono::steady_clock::time_point t2 = chrono::steady_clock::now(); + chrono::duration timeUsed1 = + chrono::duration_cast>(t2 - t1); + lambdaMin = kShonan.computeMinEigenValue(result, &minEigenVector); + chrono::steady_clock::time_point t3 = chrono::steady_clock::now(); + chrono::duration timeUsed2 = + chrono::duration_cast>(t3 - t1); + Qstar = result; + CostP = kShonan.costAt(p, result); + const Values SO3Values = kShonan.roundSolution(result); + Cost3 = kShonan.cost(SO3Values); + suBound = (Cost3 - CostP) / CostP; + + saveData(p, timeUsed1.count(), CostP, Cost3, timeUsed2.count(), + lambdaMin, suBound, &cout); + saveData(p, timeUsed1.count(), CostP, Cost3, timeUsed2.count(), + lambdaMin, suBound, &csvFile); + } + saveResult(name, kShonan.roundSolution(Qstar)); + // saveG2oResult(name, kShonan.roundSolution(Qstar), kShonan.Poses()); + return 0; +} diff --git a/matlab/+gtsam/Point2.m b/matlab/+gtsam/Point2.m new file mode 100644 index 000000000..3ea65c33e --- /dev/null +++ b/matlab/+gtsam/Point2.m @@ -0,0 +1,12 @@ +function pt = Point2(varargin) + % Point2 shim + if nargin == 2 && isa(varargin{1}, 'double') + pt = [varargin{1} varargin{2}]'; + elseif nargin == 1 + pt = varargin{1}; + elseif nargin == 0 + pt = [0 0]'; + else + error('Arguments do not match any overload of Point2 shim'); + end +end \ No newline at end of file diff --git a/matlab/+gtsam/Point3.m b/matlab/+gtsam/Point3.m new file mode 100644 index 000000000..5f66b4517 --- /dev/null +++ b/matlab/+gtsam/Point3.m @@ -0,0 +1,12 @@ +function pt = Point3(varargin) + % Point3 shim + if nargin == 3 && isa(varargin{1}, 'double') + pt = [varargin{1} varargin{2} varargin{3}]'; + elseif nargin == 1 + pt = varargin{1}; + elseif nargin == 0 + pt = [0 0 0]'; + else + error('Arguments do not match any overload of Point3 shim'); + end +end \ No newline at end of file diff --git a/matlab/+gtsam/cylinderSampleProjection.m b/matlab/+gtsam/cylinderSampleProjection.m index 2b913b52d..697a57faa 100644 --- a/matlab/+gtsam/cylinderSampleProjection.m +++ b/matlab/+gtsam/cylinderSampleProjection.m @@ -50,9 +50,9 @@ for i = 1:cylinderNum visible = true; for k = 1:cylinderNum - rayCameraToPoint = pose.translation().between(sampledPoint3).vector(); - rayCameraToCylinder = pose.translation().between(cylinders{k}.centroid).vector(); - rayCylinderToPoint = cylinders{k}.centroid.between(sampledPoint3).vector(); + rayCameraToPoint = pose.translation().between(sampledPoint3); + rayCameraToCylinder = pose.translation().between(cylinders{k}.centroid); + rayCylinderToPoint = cylinders{k}.centroid.between(sampledPoint3); % Condition 1: all points in front of the cylinders' % surfaces are visible diff --git a/matlab/+gtsam/cylinderSampleProjectionStereo.m b/matlab/+gtsam/cylinderSampleProjectionStereo.m index 10409ac6d..58b4140fd 100644 --- a/matlab/+gtsam/cylinderSampleProjectionStereo.m +++ b/matlab/+gtsam/cylinderSampleProjectionStereo.m @@ -25,20 +25,20 @@ for i = 1:cylinderNum % For Cheirality Exception sampledPoint3 = cylinders{i}.Points{j}; sampledPoint3local = pose.transformTo(sampledPoint3); - if sampledPoint3local.z < 0 + if sampledPoint3local(3) < 0 continue; end % measurements - Z.du = K.fx() * K.baseline() / sampledPoint3local.z; - Z.uL = K.fx() * sampledPoint3local.x / sampledPoint3local.z + K.px(); + Z.du = K.fx() * K.baseline() / sampledPoint3local(3); + Z.uL = K.fx() * sampledPoint3local(1) / sampledPoint3local(3) + K.px(); Z.uR = Z.uL + Z.du; - Z.v = K.fy() / sampledPoint3local.z + K.py(); + Z.v = K.fy() / sampledPoint3local(3) + K.py(); % ignore points not visible in the scene - if Z.uL < 0 || Z.uL >= imageSize.x || ... - Z.uR < 0 || Z.uR >= imageSize.x || ... - Z.v < 0 || Z.v >= imageSize.y + if Z.uL < 0 || Z.uL >= imageSize(1) || ... + Z.uR < 0 || Z.uR >= imageSize(1) || ... + Z.v < 0 || Z.v >= imageSize(2) continue; end @@ -54,9 +54,9 @@ for i = 1:cylinderNum visible = true; for k = 1:cylinderNum - rayCameraToPoint = pose.translation().between(sampledPoint3).vector(); - rayCameraToCylinder = pose.translation().between(cylinders{k}.centroid).vector(); - rayCylinderToPoint = cylinders{k}.centroid.between(sampledPoint3).vector(); + rayCameraToPoint = sampledPoint3 - pose.translation(); + rayCameraToCylinder = cylinders{k}.centroid - pose.translation(); + rayCylinderToPoint = sampledPoint3 - cylinders{k}.centroid; % Condition 1: all points in front of the cylinders' % surfaces are visible diff --git a/matlab/+gtsam/cylinderSampling.m b/matlab/+gtsam/cylinderSampling.m index dcaa9c721..dc76295fa 100644 --- a/matlab/+gtsam/cylinderSampling.m +++ b/matlab/+gtsam/cylinderSampling.m @@ -12,8 +12,8 @@ function [cylinder] = cylinderSampling(baseCentroid, radius, height, density) % sample the points for i = 1:pointsNum theta = 2 * pi * rand; - x = radius * cos(theta) + baseCentroid.x; - y = radius * sin(theta) + baseCentroid.y; + x = radius * cos(theta) + baseCentroid(1); + y = radius * sin(theta) + baseCentroid(2); z = height * rand; points3{i,1} = Point3([x,y,z]'); end @@ -22,5 +22,5 @@ function [cylinder] = cylinderSampling(baseCentroid, radius, height, density) cylinder.radius = radius; cylinder.height = height; cylinder.Points = points3; - cylinder.centroid = Point3(baseCentroid.x, baseCentroid.y, height/2); + cylinder.centroid = Point3(baseCentroid(1), baseCentroid(2), height/2); end \ No newline at end of file diff --git a/matlab/+gtsam/plotCamera.m b/matlab/+gtsam/plotCamera.m index d0d1f45b7..986cd9a68 100644 --- a/matlab/+gtsam/plotCamera.m +++ b/matlab/+gtsam/plotCamera.m @@ -1,7 +1,7 @@ function plotCamera(pose, axisLength) hold on - C = pose.translation().vector(); + C = pose.translation(); R = pose.rotation().matrix(); xAxis = C+R(:,1)*axisLength; diff --git a/matlab/+gtsam/plotFlyingResults.m b/matlab/+gtsam/plotFlyingResults.m index 5d4a287c4..202f2409b 100644 --- a/matlab/+gtsam/plotFlyingResults.m +++ b/matlab/+gtsam/plotFlyingResults.m @@ -13,7 +13,7 @@ set(gcf, 'Position', [80,1,1800,1000]); %% plot all the cylinders and sampled points axis equal -axis([0, options.fieldSize.x, 0, options.fieldSize.y, 0, options.height + 30]); +axis([0, options.fieldSize(1), 0, options.fieldSize(2), 0, options.height + 30]); xlabel('X (m)'); ylabel('Y (m)'); zlabel('Height (m)'); @@ -50,8 +50,8 @@ for i = 1:cylinderNum [X,Y,Z] = cylinder(cylinders{i}.radius, sampleDensity * cylinders{i}.radius * cylinders{i}.height); - X = X + cylinders{i}.centroid.x; - Y = Y + cylinders{i}.centroid.y; + X = X + cylinders{i}.centroid(1); + Y = Y + cylinders{i}.centroid(2); Z = Z * cylinders{i}.height; h_cylinder{i} = surf(X,Y,Z); @@ -76,7 +76,7 @@ for i = 1:posesSize %plotCamera(poses{i}, 3); gRp = poses{i}.rotation().matrix(); % rotation from pose to global - C = poses{i}.translation().vector(); + C = poses{i}.translation(); axisLength = 3; xAxis = C+gRp(:,1)*axisLength; @@ -111,14 +111,14 @@ for i = 1:posesSize for j = 1:pointSize if ~isempty(pts3d{j}.cov{i}) hold on - h_point{j} = plot3(pts3d{j}.data.x, pts3d{j}.data.y, pts3d{j}.data.z); - h_point_cov{j} = gtsam.covarianceEllipse3D([pts3d{j}.data.x; pts3d{j}.data.y; pts3d{j}.data.z], ... + h_point{j} = plot3(pts3d{j}.data(1), pts3d{j}.data(2), pts3d{j}.data(3)); + h_point_cov{j} = gtsam.covarianceEllipse3D([pts3d{j}.data(1); pts3d{j}.data(2); pts3d{j}.data(3)], ... pts3d{j}.cov{i}, options.plot.covarianceScale); end end axis equal - axis([0, options.fieldSize.x, 0, options.fieldSize.y, 0, options.height + 30]); + axis([0, options.fieldSize(1), 0, options.fieldSize(2), 0, options.height + 30]); drawnow @@ -158,7 +158,7 @@ for i = 1 : posesSize hold on campos([poses{i}.x, poses{i}.y, poses{i}.z]); - camtarget([options.fieldSize.x/2, options.fieldSize.y/2, 0]); + camtarget([options.fieldSize(1)/2, options.fieldSize(2)/2, 0]); camlight(hlight, 'headlight'); if options.writeVideo diff --git a/matlab/+gtsam/plotPoint2.m b/matlab/+gtsam/plotPoint2.m index cd066951d..8d10ecab6 100644 --- a/matlab/+gtsam/plotPoint2.m +++ b/matlab/+gtsam/plotPoint2.m @@ -1,10 +1,10 @@ function plotPoint2(p,color,P) % plotPoint2 shows a Point2, possibly with covariance matrix if size(color,2)==1 - plot(p.x,p.y,[color '*']); + plot(p(1),p(2),[color '*']); else - plot(p.x,p.y,color); + plot(p(1),p(2),color); end if exist('P', 'var') && (~isempty(P)) - gtsam.covarianceEllipse([p.x;p.y],P,color(1)); + gtsam.covarianceEllipse([p(1);p(2)],P,color(1)); end \ No newline at end of file diff --git a/matlab/+gtsam/plotPoint3.m b/matlab/+gtsam/plotPoint3.m index 390b44939..85c84a210 100644 --- a/matlab/+gtsam/plotPoint3.m +++ b/matlab/+gtsam/plotPoint3.m @@ -1,12 +1,12 @@ function plotPoint3(p, color, P) %PLOTPOINT3 Plot a Point3 with an optional covariance matrix if size(color,2)==1 - plot3(p.x,p.y,p.z,[color '*']); + plot3(p(1),p(2),p(3),[color '*']); else - plot3(p.x,p.y,p.z,color); + plot3(p(1),p(2),p(3),color); end if exist('P', 'var') - gtsam.covarianceEllipse3D([p.x;p.y;p.z],P); + gtsam.covarianceEllipse3D([p(1);p(2);p(3)],P); end end diff --git a/matlab/+gtsam/plotPose3.m b/matlab/+gtsam/plotPose3.m index 8c3c7dd76..258e3f776 100644 --- a/matlab/+gtsam/plotPose3.m +++ b/matlab/+gtsam/plotPose3.m @@ -4,7 +4,7 @@ if nargin<3,axisLength=0.1;end % get rotation and translation (center) gRp = pose.rotation().matrix(); % rotation from pose to global -C = pose.translation().vector(); +C = pose.translation(); if ~isempty(axisLength) % draw the camera axes diff --git a/matlab/+gtsam/points2DTrackStereo.m b/matlab/+gtsam/points2DTrackStereo.m index 60c9f1295..04cddb7f7 100644 --- a/matlab/+gtsam/points2DTrackStereo.m +++ b/matlab/+gtsam/points2DTrackStereo.m @@ -38,7 +38,7 @@ graph.add(PriorFactorPose3(symbol('x', 1), cameraPoses{1}, posePriorNoise)); %% initialize graph and values initialEstimate = Values; for i = 1:pointsNum - point_j = points3d{i}.data.retract(0.05*randn(3,1)); + point_j = points3d{i}.data + (0.05*randn(3,1)); initialEstimate.insert(symbol('p', i), point_j); end diff --git a/matlab/gtsam_examples/CameraFlyingExample.m b/matlab/gtsam_examples/CameraFlyingExample.m index add2bc75a..a0dfef22a 100644 --- a/matlab/gtsam_examples/CameraFlyingExample.m +++ b/matlab/gtsam_examples/CameraFlyingExample.m @@ -46,7 +46,7 @@ options.camera.horizon = 60; % camera baseline options.camera.baseline = 0.05; % camera focal length -options.camera.f = round(options.camera.resolution.x * options.camera.horizon / ... +options.camera.f = round(options.camera.resolution(1) * options.camera.horizon / ... options.camera.fov); % camera focal baseline options.camera.fB = options.camera.f * options.camera.baseline; @@ -54,15 +54,15 @@ options.camera.fB = options.camera.f * options.camera.baseline; options.camera.disparity = options.camera.fB / options.camera.horizon; % Monocular Camera Calibration options.camera.monoK = Cal3_S2(options.camera.f, options.camera.f, 0, ... - options.camera.resolution.x/2, options.camera.resolution.y/2); + options.camera.resolution(1)/2, options.camera.resolution(2)/2); % Stereo Camera Calibration options.camera.stereoK = Cal3_S2Stereo(options.camera.f, options.camera.f, 0, ... - options.camera.resolution.x/2, options.camera.resolution.y/2, options.camera.disparity); + options.camera.resolution(1)/2, options.camera.resolution(2)/2, options.camera.disparity); % write video output options.writeVideo = true; % the testing field size (unit: meter) -options.fieldSize = Point2([100, 100]'); +options.fieldSize = Point2(100, 100); % camera flying speed (unit: meter / second) options.speed = 20; % camera flying height @@ -113,14 +113,14 @@ theta = 0; i = 1; while i <= cylinderNum theta = theta + 2*pi/10; - x = 40 * rand * cos(theta) + options.fieldSize.x/2; - y = 20 * rand * sin(theta) + options.fieldSize.y/2; - baseCentroid{i} = Point2([x, y]'); + x = 40 * rand * cos(theta) + options.fieldSize(1)/2; + y = 20 * rand * sin(theta) + options.fieldSize(2)/2; + baseCentroid{i} = Point2(x, y); % prevent two cylinders interact with each other regenerate = false; for j = 1:i-1 - if i > 1 && baseCentroid{i}.dist(baseCentroid{j}) < options.cylinder.radius * 2 + if i > 1 && norm(baseCentroid{i} - baseCentroid{j}) < options.cylinder.radius * 2 regenerate = true; break; end @@ -146,17 +146,17 @@ while 1 angle = 0.1*pi*(rand-0.5); inc_t = Point3(distance * cos(angle), ... distance * sin(angle), 0); - t = t.compose(inc_t); + t = t + inc_t; - if t.x > options.fieldSize.x - 10 || t.y > options.fieldSize.y - 10; + if t(1) > options.fieldSize(1) - 10 || t(2) > options.fieldSize(2) - 10; break; end %t = Point3([(i-1)*(options.fieldSize.x - 10)/options.poseNum + 10, ... % 15, 10]'); camera = PinholeCameraCal3_S2.Lookat(t, ... - Point3(options.fieldSize.x/2, options.fieldSize.y/2, 0), ... - Point3([0,0,1]'), options.camera.monoK); + Point3(options.fieldSize(1)/2, options.fieldSize(2)/2, 0), ... + Point3(0,0,1), options.camera.monoK); cameraPoses{end+1} = camera.pose; end diff --git a/matlab/gtsam_examples/MonocularVOExample.m b/matlab/gtsam_examples/MonocularVOExample.m index 11c4253de..0d09a1487 100644 --- a/matlab/gtsam_examples/MonocularVOExample.m +++ b/matlab/gtsam_examples/MonocularVOExample.m @@ -15,14 +15,14 @@ import gtsam.* %% Create two cameras and corresponding essential matrix E aRb = Rot3.Yaw(pi/2); -aTb = Point3 (0.1, 0, 0); +aTb = [.1, 0, 0]'; identity=Pose3; aPb = Pose3(aRb, aTb); cameraA = CalibratedCamera(identity); cameraB = CalibratedCamera(aPb); %% Create 5 points -P = { Point3(0, 0, 1), Point3(-0.1, 0, 1), Point3(0.1, 0, 1), Point3(0, 0.5, 0.5), Point3(0, -0.5, 0.5) }; +P = { [0, 0, 1]', [-0.1, 0, 1]', [0.1, 0, 1]', [0, 0.5, 0.5]', [0, -0.5, 0.5]' }; %% Project points in both cameras for i=1:5 diff --git a/matlab/gtsam_examples/PlanarSLAMExample.m b/matlab/gtsam_examples/PlanarSLAMExample.m index aec933d74..3febc23e6 100644 --- a/matlab/gtsam_examples/PlanarSLAMExample.m +++ b/matlab/gtsam_examples/PlanarSLAMExample.m @@ -71,9 +71,12 @@ marginals = Marginals(graph, result); plot2DTrajectory(result, [], marginals); plot2DPoints(result, 'b', marginals); -plot([result.atPose2(i1).x; result.atPoint2(j1).x],[result.atPose2(i1).y; result.atPoint2(j1).y], 'c-'); -plot([result.atPose2(i2).x; result.atPoint2(j1).x],[result.atPose2(i2).y; result.atPoint2(j1).y], 'c-'); -plot([result.atPose2(i3).x; result.atPoint2(j2).x],[result.atPose2(i3).y; result.atPoint2(j2).y], 'c-'); +p_j1 = result.atPoint2(j1); +p_j2 = result.atPoint2(j2); + +plot([result.atPose2(i1).x; p_j1(1)],[result.atPose2(i1).y; p_j1(2)], 'c-'); +plot([result.atPose2(i2).x; p_j1(1)],[result.atPose2(i2).y; p_j1(2)], 'c-'); +plot([result.atPose2(i3).x; p_j2(1)],[result.atPose2(i3).y; p_j2(2)], 'c-'); axis([-0.6 4.8 -1 1]) axis equal view(2) diff --git a/matlab/gtsam_examples/PlanarSLAMExample_sampling.m b/matlab/gtsam_examples/PlanarSLAMExample_sampling.m index 375ed645c..93979a68a 100644 --- a/matlab/gtsam_examples/PlanarSLAMExample_sampling.m +++ b/matlab/gtsam_examples/PlanarSLAMExample_sampling.m @@ -60,15 +60,18 @@ for j=1:2 S{j}=chol(Q{j}); % for sampling end -plot([sample.atPose2(i1).x; sample.atPoint2(j1).x],[sample.atPose2(i1).y; sample.atPoint2(j1).y], 'c-'); -plot([sample.atPose2(i2).x; sample.atPoint2(j1).x],[sample.atPose2(i2).y; sample.atPoint2(j1).y], 'c-'); -plot([sample.atPose2(i3).x; sample.atPoint2(j2).x],[sample.atPose2(i3).y; sample.atPoint2(j2).y], 'c-'); +p_j1 = sample.atPoint2(j1); +p_j2 = sample.atPoint2(j2); + +plot([sample.atPose2(i1).x; p_j1(1)],[sample.atPose2(i1).y; p_j1(2)], 'c-'); +plot([sample.atPose2(i2).x; p_j1(1)],[sample.atPose2(i2).y; p_j1(2)], 'c-'); +plot([sample.atPose2(i3).x; p_j2(1)],[sample.atPose2(i3).y; p_j2(2)], 'c-'); view(2); axis auto; axis equal %% Do Sampling on point 2 N=1000; for s=1:N delta = S{2}*randn(2,1); - proposedPoint = Point2(point{2}.x+delta(1),point{2}.y+delta(2)); + proposedPoint = Point2(point{2} + delta); plotPoint2(proposedPoint,'k.') end \ No newline at end of file diff --git a/matlab/gtsam_examples/Pose2SLAMwSPCG.m b/matlab/gtsam_examples/Pose2SLAMwSPCG.m index 67f22fe1d..2df7efe2f 100644 --- a/matlab/gtsam_examples/Pose2SLAMwSPCG.m +++ b/matlab/gtsam_examples/Pose2SLAMwSPCG.m @@ -54,7 +54,7 @@ initialEstimate.print(sprintf('\nInitial estimate:\n')); %% Optimize using Levenberg-Marquardt optimization with SubgraphSolver params = gtsam.LevenbergMarquardtParams; subgraphParams = gtsam.SubgraphSolverParameters; -params.setLinearSolverType('CONJUGATE_GRADIENT'); +params.setLinearSolverType('ITERATIVE'); params.setIterativeParams(subgraphParams); optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate); result = optimizer.optimize(); diff --git a/matlab/gtsam_examples/SBAExample.m b/matlab/gtsam_examples/SBAExample.m index 584ace09a..f94a2ee4e 100644 --- a/matlab/gtsam_examples/SBAExample.m +++ b/matlab/gtsam_examples/SBAExample.m @@ -47,7 +47,7 @@ end %% Add Gaussian priors for a pose and a landmark to constrain the system cameraPriorNoise = noiseModel.Diagonal.Sigmas(cameraNoiseSigmas); firstCamera = PinholeCameraCal3_S2(truth.cameras{1}.pose, truth.K); -graph.add(PriorFactorSimpleCamera(symbol('c',1), firstCamera, cameraPriorNoise)); +graph.add(PriorFactorPinholeCameraCal3_S2(symbol('c',1), firstCamera, cameraPriorNoise)); pointPriorNoise = noiseModel.Isotropic.Sigma(3,pointNoiseSigma); graph.add(PriorFactorPoint3(symbol('p',1), truth.points{1}, pointPriorNoise)); @@ -64,7 +64,7 @@ for i=1:size(truth.cameras,2) initialEstimate.insert(symbol('c',i), camera_i); end for j=1:size(truth.points,2) - point_j = Point3(truth.points{j}.vector() + 0.1*randn(3,1)); + point_j = Point3(truth.points{j} + 0.1*randn(3,1)); initialEstimate.insert(symbol('p',j), point_j); end initialEstimate.print(sprintf('\nInitial estimate:\n ')); diff --git a/matlab/gtsam_examples/SFMExample.m b/matlab/gtsam_examples/SFMExample.m index 6700e90d2..a57da929c 100644 --- a/matlab/gtsam_examples/SFMExample.m +++ b/matlab/gtsam_examples/SFMExample.m @@ -58,7 +58,7 @@ for i=1:size(truth.cameras,2) initialEstimate.insert(symbol('x',i), pose_i); end for j=1:size(truth.points,2) - point_j = Point3(truth.points{j}.vector() + 0.1*randn(3,1)); + point_j = Point3(truth.points{j} + 0.1*randn(3,1)); initialEstimate.insert(symbol('p',j), point_j); end initialEstimate.print(sprintf('\nInitial estimate:\n ')); diff --git a/matlab/gtsam_examples/StereoVOExample.m b/matlab/gtsam_examples/StereoVOExample.m index b437ca80c..e7345fcf2 100644 --- a/matlab/gtsam_examples/StereoVOExample.m +++ b/matlab/gtsam_examples/StereoVOExample.m @@ -51,10 +51,10 @@ graph.add(GenericStereoFactor3D(StereoPoint2(320, 270, 115), stereo_model, x2, l initialEstimate = Values; initialEstimate.insert(x1, first_pose); % noisy estimate for pose 2 -initialEstimate.insert(x2, Pose3(Rot3(), Point3(0.1,-.1,1.1))); -initialEstimate.insert(l1, Point3( 1, 1, 5)); -initialEstimate.insert(l2, Point3(-1, 1, 5)); -initialEstimate.insert(l3, Point3( 0,-.5, 5)); +initialEstimate.insert(x2, Pose3(Rot3(), [0.1, -.1, 1.1]')); +initialEstimate.insert(l1, [ 1, 1, 5]'); +initialEstimate.insert(l2, [-1, 1, 5]'); +initialEstimate.insert(l3, [ 0,-.5, 5]'); %% optimize fprintf(1,'Optimizing\n'); tic diff --git a/matlab/gtsam_examples/VisualizeMEstimators.m b/matlab/gtsam_examples/VisualizeMEstimators.m index 8a0485334..ce505df5d 100644 --- a/matlab/gtsam_examples/VisualizeMEstimators.m +++ b/matlab/gtsam_examples/VisualizeMEstimators.m @@ -48,7 +48,7 @@ function plot_m_estimator(x, model, plot_title, fig_id, filename) rho = zeros(size(x)); for i = 1:size(x, 2) w(i) = model.weight(x(i)); - rho(i) = model.residual(x(i)); + rho(i) = model.loss(x(i)); end psi = w .* x; diff --git a/matlab/gtsam_tests/testPlanarSLAMExample.m b/matlab/gtsam_tests/testPlanarSLAMExample.m index d3cab5d1f..e0b4dbfc8 100644 --- a/matlab/gtsam_tests/testPlanarSLAMExample.m +++ b/matlab/gtsam_tests/testPlanarSLAMExample.m @@ -66,4 +66,4 @@ CHECK('pose_1.equals(Pose2,1e-4)',pose_1.equals(Pose2,1e-4)); point_1 = result.atPoint2(symbol('l',1)); marginals.marginalCovariance(symbol('l',1)); -CHECK('point_1.equals(Point2(2,2),1e-4)',point_1.equals(Point2(2,2),1e-4)); +CHECK('point_1.equals(Point2(2,2),1e-4)',norm(point_1 - Point2(2,2)) < 1e-4); diff --git a/matlab/gtsam_tests/testSFMExample.m b/matlab/gtsam_tests/testSFMExample.m index 985cbdb2c..a1f63c3a7 100644 --- a/matlab/gtsam_tests/testSFMExample.m +++ b/matlab/gtsam_tests/testSFMExample.m @@ -69,5 +69,5 @@ end for j=1:size(truth.points,2) point_j = result.atPoint3(symbol('p',j)); - CHECK('point_j.equals(truth.points{j},1e-5)',point_j.equals(truth.points{j},1e-5)) + CHECK('point_j.equals(truth.points{j},1e-5)',norm(point_j - truth.points{j}) < 1e-5) end diff --git a/matlab/gtsam_tests/testStereoVOExample.m b/matlab/gtsam_tests/testStereoVOExample.m index 218d0ace1..c721244ba 100644 --- a/matlab/gtsam_tests/testStereoVOExample.m +++ b/matlab/gtsam_tests/testStereoVOExample.m @@ -65,4 +65,4 @@ pose_x1 = result.atPose3(x1); CHECK('pose_x1.equals(first_pose,1e-4)',pose_x1.equals(first_pose,1e-4)); point_l1 = result.atPoint3(l1); -CHECK('point_1.equals(expected_l1,1e-4)',point_l1.equals(expected_l1,1e-4)); \ No newline at end of file +CHECK('point_1.equals(expected_l1,1e-4)',norm(point_l1 - expected_l1) < 1e-4); \ No newline at end of file diff --git a/matlab/gtsam_tests/testValues.m b/matlab/gtsam_tests/testValues.m index fe2cd30fe..48bc83f2c 100644 --- a/matlab/gtsam_tests/testValues.m +++ b/matlab/gtsam_tests/testValues.m @@ -5,8 +5,8 @@ values = Values; E = EssentialMatrix(Rot3,Unit3); tol = 1e-9; -values.insert(0, Point2); -values.insert(1, Point3); +values.insert(0, Point2(0, 0)); +values.insert(1, Point3(0, 0, 0)); values.insert(2, Rot2); values.insert(3, Pose2); values.insert(4, Rot3); @@ -21,8 +21,8 @@ values.insert(10, imuBias.ConstantBias); values.insert(11, [1;2;3]); values.insert(12, [1 2;3 4]); -EXPECT('at',values.atPoint2(0).equals(Point2,tol)); -EXPECT('at',values.atPoint3(1).equals(Point3,tol)); +EXPECT('at',values.atPoint2(0) == Point2(0, 0)); +EXPECT('at',values.atPoint3(1) == Point3(0, 0, 0)); EXPECT('at',values.atRot2(2).equals(Rot2,tol)); EXPECT('at',values.atPose2(3).equals(Pose2,tol)); EXPECT('at',values.atRot3(4).equals(Rot3,tol)); diff --git a/matlab/gtsam_tests/testVisualISAMExample.m b/matlab/gtsam_tests/testVisualISAMExample.m index 223e823a6..f75942ea7 100644 --- a/matlab/gtsam_tests/testVisualISAMExample.m +++ b/matlab/gtsam_tests/testVisualISAMExample.m @@ -51,5 +51,5 @@ end for j=1:size(truth.points,2) point_j = result.atPoint3(symbol('l',j)); - CHECK('point_j.equals(truth.points{j},1e-5)',point_j.equals(truth.points{j},1e-5)) + CHECK('point_j.equals(truth.points{j},1e-5)',norm(point_j - truth.points{j}) < 1e-5) end diff --git a/matlab/unstable_examples/+imuSimulator/IMUComparison.m b/matlab/unstable_examples/+imuSimulator/IMUComparison.m index 68e20bb25..871f023ef 100644 --- a/matlab/unstable_examples/+imuSimulator/IMUComparison.m +++ b/matlab/unstable_examples/+imuSimulator/IMUComparison.m @@ -28,19 +28,19 @@ currentVelocityGlobalIMUbody = currentVelocityGlobal; %% Prepare data structures for actual trajectory and estimates % Actual trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; % Trajectory estimate (integrated in the navigation frame) positionsIMUnav = zeros(3, length(times)+1); -positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation; posesIMUnav(1).p = positionsIMUnav(:,1); posesIMUnav(1).R = poses(1).R; % Trajectory estimate (integrated in the body frame) positionsIMUbody = zeros(3, length(times)+1); -positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation; posesIMUbody(1).p = positionsIMUbody(:,1); posesIMUbody(1).R = poses(1).R; @@ -120,9 +120,9 @@ for t = times currentPoseGlobalIMUnav, currentVelocityGlobalIMUnav, acc_omega, deltaT); %% Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; - positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation.vector; - positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation.vector; + positions(:,i) = currentPoseGlobal.translation; + positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation; + positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation; % - poses(i).p = positions(:,i); posesIMUbody(i).p = positionsIMUbody(:,i); diff --git a/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m b/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m index c589bea32..450697de0 100644 --- a/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m +++ b/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m @@ -28,7 +28,7 @@ currentVelocityGlobal = velocity; %% Prepare data structures for actual trajectory and estimates % Actual trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; @@ -112,7 +112,7 @@ for t = times end %% Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; + positions(:,i) = currentPoseGlobal.translation; i = i + 1; end diff --git a/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m b/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m index c86e40a21..0d8abad2c 100644 --- a/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m +++ b/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m @@ -7,7 +7,7 @@ measuredOmega = omega1Body; % Acceleration measurement (in this simple toy example no other forces % act on the body and the only acceleration is the centripetal Coriolis acceleration) -measuredAcc = Point3(cross(omega1Body, velocity1Body)).vector; +measuredAcc = Point3(cross(omega1Body, velocity1Body)); acc_omega = [ measuredAcc; measuredOmega ]; end diff --git a/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m b/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m index 534b9365e..5ed1fc516 100644 --- a/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m +++ b/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m @@ -6,16 +6,16 @@ import gtsam.*; % Calculate gyro measured rotation rate by transforming body rotation rate % into the IMU frame. -measuredOmega = imuFrame.rotation.unrotate(Point3(omega1Body)).vector; +measuredOmega = imuFrame.rotation.unrotate(Point3(omega1Body)); % Transform both velocities into IMU frame, accounting for the velocity % induced by rigid body rotation on a lever arm (Coriolis effect). velocity1inertial = imuFrame.rotation.unrotate( ... - Point3(velocity1Body + cross(omega1Body, imuFrame.translation.vector))).vector; + Point3(velocity1Body + cross(omega1Body, imuFrame.translation))); imu2in1 = Rot3.Expmap(measuredOmega * deltaT); velocity2inertial = imu2in1.rotate(imuFrame.rotation.unrotate( ... - Point3(velocity2Body + cross(omega2Body, imuFrame.translation.vector)))).vector; + Point3(velocity2Body + cross(omega2Body, imuFrame.translation)))); % Acceleration in IMU frame measuredAcc = (velocity2inertial - velocity1inertial) / deltaT; diff --git a/matlab/unstable_examples/+imuSimulator/coriolisExample.m b/matlab/unstable_examples/+imuSimulator/coriolisExample.m index 35d27aa73..ee4deb433 100644 --- a/matlab/unstable_examples/+imuSimulator/coriolisExample.m +++ b/matlab/unstable_examples/+imuSimulator/coriolisExample.m @@ -190,13 +190,13 @@ for i = 1:length(times) newFactors.add(PriorFactorConstantBias(currentBiasKey, zeroBias, sigma_init_b)); % Store data - positionsInFixedGT(:,1) = currentPoseFixedGT.translation.vector; + positionsInFixedGT(:,1) = currentPoseFixedGT.translation; velocityInFixedGT(:,1) = currentVelocityFixedGT; - positionsInRotatingGT(:,1) = currentPoseRotatingGT.translation.vector; - %velocityInRotatingGT(:,1) = currentPoseRotatingGT.velocity.vector; - positionsEstimates(:,i) = currentPoseEstimate.translation.vector; - velocitiesEstimates(:,i) = currentVelocityEstimate.vector; - currentRotatingFrameForPlot(1).p = currentRotatingFrame.translation.vector; + positionsInRotatingGT(:,1) = currentPoseRotatingGT.translation; + %velocityInRotatingGT(:,1) = currentPoseRotatingGT.velocity; + positionsEstimates(:,i) = currentPoseEstimate.translation; + velocitiesEstimates(:,i) = currentVelocityEstimate; + currentRotatingFrameForPlot(1).p = currentRotatingFrame.translation; currentRotatingFrameForPlot(1).R = currentRotatingFrame.rotation.matrix; else @@ -204,18 +204,18 @@ for i = 1:length(times) % Update the position and velocity % x_t = x_0 + v_0*dt + 1/2*a_0*dt^2 % v_t = v_0 + a_0*dt - currentPositionFixedGT = Point3(currentPoseFixedGT.translation.vector ... + currentPositionFixedGT = Point3(currentPoseFixedGT.translation ... + currentVelocityFixedGT * deltaT + 0.5 * accelFixed * deltaT * deltaT); currentVelocityFixedGT = currentVelocityFixedGT + accelFixed * deltaT; currentPoseFixedGT = Pose3(Rot3, currentPositionFixedGT); % constant orientation % Rotate pose in fixed frame to get pose in rotating frame - previousPositionRotatingGT = currentPoseRotatingGT.translation.vector; + previousPositionRotatingGT = currentPoseRotatingGT.translation; currentRotatingFrame = currentRotatingFrame.compose(changePoseRotatingFrame); inverseCurrentRotatingFrame = (currentRotatingFrame.inverse); currentPoseRotatingGT = inverseCurrentRotatingFrame.compose(currentPoseFixedGT); - currentPositionRotatingGT = currentPoseRotatingGT.translation.vector; + currentPositionRotatingGT = currentPoseRotatingGT.translation; % Get velocity in rotating frame by treating it like a position and using compose % Maybe Luca knows a better way to do this within GTSAM. @@ -230,11 +230,11 @@ for i = 1:length(times) % - 0.5 * accelFixed * deltaT * deltaT) / deltaT + accelFixed * deltaT; % Store GT (ground truth) poses - positionsInFixedGT(:,i) = currentPoseFixedGT.translation.vector; + positionsInFixedGT(:,i) = currentPoseFixedGT.translation; velocityInFixedGT(:,i) = currentVelocityFixedGT; - positionsInRotatingGT(:,i) = currentPoseRotatingGT.translation.vector; + positionsInRotatingGT(:,i) = currentPoseRotatingGT.translation; velocityInRotatingGT(:,i) = currentVelocityRotatingGT; - currentRotatingFrameForPlot(i).p = currentRotatingFrame.translation.vector; + currentRotatingFrameForPlot(i).p = currentRotatingFrame.translation; currentRotatingFrameForPlot(i).R = currentRotatingFrame.rotation.matrix; %% Estimate trajectory in rotating frame using GTSAM (ground truth measurements) @@ -303,9 +303,9 @@ for i = 1:length(times) currentVelocityEstimate = isam.calculateEstimate(currentVelKey); currentBias = isam.calculateEstimate(currentBiasKey); - positionsEstimates(:,i) = currentPoseEstimate.translation.vector; - velocitiesEstimates(:,i) = currentVelocityEstimate.vector; - biasEstimates(:,i) = currentBias.vector; + positionsEstimates(:,i) = currentPoseEstimate.translation; + velocitiesEstimates(:,i) = currentVelocityEstimate; + biasEstimates(:,i) = currentBias; % In matrix form: R_error = R_gt'*R_estimate % Perform Logmap on the rotation matrix to get a vector diff --git a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m index 2eddf75ee..64ba36d3b 100644 --- a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m +++ b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m @@ -151,14 +151,14 @@ hold on; if options.includeCameraFactors b = [-1000 2000 -2000 2000 -30 30]; for i = 1:size(metadata.camera.gtLandmarkPoints,2) - p = metadata.camera.gtLandmarkPoints(i).vector; + p = metadata.camera.gtLandmarkPoints(i); if(p(1) > b(1) && p(1) < b(2) && p(2) > b(3) && p(2) < b(4) && p(3) > b(5) && p(3) < b(6)) plot3(p(1), p(2), p(3), 'k+'); end end pointsToPlot = metadata.camera.gtLandmarkPoints(find(projectionFactorSeenBy > 0)); for i = 1:length(pointsToPlot) - p = pointsToPlot(i).vector; + p = pointsToPlot(i); plot3(p(1), p(2), p(3), 'gs', 'MarkerSize', 10); end end @@ -233,9 +233,9 @@ for k=1:numMonteCarloRuns for i=0:options.trajectoryLength % compute estimation errors currentPoseKey = symbol('x', i); - gtPosition = gtValues.at(currentPoseKey).translation.vector; - estPosition = estimate.at(currentPoseKey).translation.vector; - estR = estimate.at(currentPoseKey).rotation.matrix; + gtPosition = gtValues.atPose3(currentPoseKey).translation; + estPosition = estimate.atPose3(currentPoseKey).translation; + estR = estimate.atPose3(currentPoseKey).rotation.matrix; errPosition = estPosition - gtPosition; % compute covariances: diff --git a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m index 00ae4b9c2..07f146dcb 100644 --- a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m +++ b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m @@ -14,7 +14,7 @@ graph = NonlinearFactorGraph; for i=0:length(measurements) % Get the current pose currentPoseKey = symbol('x', i); - currentPose = values.at(currentPoseKey); + currentPose = values.atPose3(currentPoseKey); if i==0 %% first time step, add priors @@ -26,11 +26,11 @@ for i=0:length(measurements) % IMU velocity and bias priors if options.includeIMUFactors == 1 currentVelKey = symbol('v', 0); - currentVel = values.at(currentVelKey).vector; + currentVel = values.atPoint3(currentVelKey); graph.add(PriorFactorLieVector(currentVelKey, LieVector(currentVel), noiseModels.noiseVel)); currentBiasKey = symbol('b', 0); - currentBias = values.at(currentBiasKey); + currentBias = values.atPoint3(currentBiasKey); graph.add(PriorFactorConstantBias(currentBiasKey, currentBias, noiseModels.noisePriorBias)); end @@ -155,7 +155,7 @@ for i=0:length(measurements) if options.includeCameraFactors == 1 for j = 1:length(measurements(i).landmarks) cameraMeasurmentNoise = measurementNoise.cameraNoiseVector .* randn(2,1); - cameraPixelMeasurement = measurements(i).landmarks(j).vector; + cameraPixelMeasurement = measurements(i).landmarks(j); % Only add the measurement if it is in the image frame (based on calibration) if(cameraPixelMeasurement(1) > 0 && cameraPixelMeasurement(2) > 0 ... && cameraPixelMeasurement(1) < 2*metadata.camera.calibration.px ... diff --git a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m index 195b7ff69..3d8a9b5d2 100644 --- a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m +++ b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m @@ -40,7 +40,7 @@ if options.useRealData == 1 %% gt Between measurements if options.includeBetweenFactors == 1 && i > 0 - prevPose = values.at(currentPoseKey - 1); + prevPose = values.atPose3(currentPoseKey - 1); deltaPose = prevPose.between(currentPose); measurements(i).deltaVector = Pose3.Logmap(deltaPose); end @@ -65,7 +65,7 @@ if options.useRealData == 1 IMUdeltaPose = IMUPose1.between(IMUPose2); IMUdeltaPoseVector = Pose3.Logmap(IMUdeltaPose); IMUdeltaRotVector = IMUdeltaPoseVector(1:3); - IMUdeltaPositionVector = IMUPose2.translation.vector - IMUPose1.translation.vector; % translation in absolute frame + IMUdeltaPositionVector = IMUPose2.translation - IMUPose1.translation; % translation in absolute frame measurements(i).imu(j).deltaT = deltaT; @@ -88,7 +88,7 @@ if options.useRealData == 1 %% gt GPS measurements if options.includeGPSFactors == 1 && i > 0 - gpsPositionVector = imuSimulator.getPoseFromGtScenario(gtScenario,scenarioInd).translation.vector; + gpsPositionVector = imuSimulator.getPoseFromGtScenario(gtScenario,scenarioInd).translation; measurements(i).gpsPositionVector = gpsPositionVector; end diff --git a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m index 3f72f1215..2de1e1103 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m +++ b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m @@ -5,9 +5,9 @@ function [ finalPose, finalVelocityGlobal ] = integrateIMUTrajectory( ... import gtsam.*; imu2in1 = Rot3.Expmap(acc_omegaIMU(4:6) * deltaT); -accelGlobal = initialPoseGlobal.rotation().rotate(Point3(acc_omegaIMU(1:3))).vector; +accelGlobal = initialPoseGlobal.rotation().rotate(Point3(acc_omegaIMU(1:3))); -finalPosition = Point3(initialPoseGlobal.translation.vector ... +finalPosition = Point3(initialPoseGlobal.translation ... + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); finalVelocityGlobal = initialVelocityGlobal + accelGlobal * deltaT; finalRotation = initialPoseGlobal.rotation.compose(imu2in1); diff --git a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m index 50b223060..bec2d760d 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m +++ b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m @@ -3,7 +3,7 @@ function [ finalPose, finalVelocityGlobal ] = integrateIMUTrajectory_bodyFrame( % Before integrating in the body frame we need to compensate for the Coriolis % effect -acc_body = acc_omegaIMU(1:3) - Point3(cross(acc_omegaIMU(4:6), velocity1Body)).vector; +acc_body = acc_omegaIMU(1:3) - Point3(cross(acc_omegaIMU(4:6), velocity1Body)); % after compensating for coriolis this will be essentially zero % since we are moving at constant body velocity @@ -16,8 +16,8 @@ finalPositionBody = velocity1Body * deltaT + 0.5 * acc_body * deltaT * deltaT; finalVelocityBody = velocity1Body + acc_body * deltaT; %% Express the integrated quantities in the global frame -finalVelocityGlobal = initialVelocityGlobal + (initialPoseGlobal.rotation().rotate(Point3(finalVelocityBody)).vector() ); -finalPosition = initialPoseGlobal.translation().vector() + initialPoseGlobal.rotation().rotate( Point3(finalPositionBody)).vector() ; +finalVelocityGlobal = initialVelocityGlobal + (initialPoseGlobal.rotation().rotate(Point3(finalVelocityBody)) ); +finalPosition = initialPoseGlobal.translation() + initialPoseGlobal.rotation().rotate( Point3(finalPositionBody)) ; finalRotation = initialPoseGlobal.rotation.compose(imu2in1); % Include position and rotation in a pose finalPose = Pose3(finalRotation, Point3(finalPosition) ); diff --git a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m index b919520ac..ea851315f 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m +++ b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m @@ -9,8 +9,8 @@ finalRotation = initialPoseGlobal.rotation.compose(imu2in1); intermediateRotation = initialPoseGlobal.rotation.compose( Rot3.Expmap(acc_omegaIMU(4:6) * deltaT/2 )); % Integrate positions (equation (1) in Lupton) -accelGlobal = intermediateRotation.rotate(Point3(acc_omegaIMU(1:3))).vector; -finalPosition = Point3(initialPoseGlobal.translation.vector ... +accelGlobal = intermediateRotation.rotate(Point3(acc_omegaIMU(1:3))); +finalPosition = Point3(initialPoseGlobal.translation ... + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); finalVelocityGlobal = initialVelocityGlobal + accelGlobal * deltaT; diff --git a/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m b/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m index e51b622b0..a704342ae 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m +++ b/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m @@ -6,16 +6,16 @@ import gtsam.*; % Rotation: R^1_2 body2in1 = Rot3.Expmap(omega1Body * deltaT); % Velocity 2 in frame 1: v^1_2 = R^1_2 v^2_2 -velocity2inertial = body2in1.rotate(Point3(velocity2Body)).vector; +velocity2inertial = body2in1.rotate(Point3(velocity2Body)); % Acceleration: a^1 = (v^1_2 - v^1_1)/dt accelBody1 = (velocity2inertial - velocity1Body) / deltaT; % Velocity 1 in frame W: v^W_1 = R^W_1 v^1_1 -initialVelocityGlobal = initialPose.rotation().rotate(Point3(velocity1Body)).vector; +initialVelocityGlobal = initialPose.rotation().rotate(Point3(velocity1Body)); % Acceleration in frame W: a^W = R^W_1 a^1 -accelGlobal = initialPose.rotation().rotate(Point3(accelBody1)).vector; +accelGlobal = initialPose.rotation().rotate(Point3(accelBody1)); -finalPosition = Point3(initialPose.translation.vector + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); +finalPosition = Point3(initialPose.translation + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); finalVelocityGlobal = initialVelocityGlobal + accelGlobal * deltaT; finalRotation = initialPose.rotation.compose(body2in1); finalPose = Pose3(finalRotation, finalPosition); diff --git a/matlab/unstable_examples/+imuSimulator/test1onestep.m b/matlab/unstable_examples/+imuSimulator/test1onestep.m index 883569849..cb66d23d6 100644 --- a/matlab/unstable_examples/+imuSimulator/test1onestep.m +++ b/matlab/unstable_examples/+imuSimulator/test1onestep.m @@ -10,7 +10,7 @@ omega = [0;0;0.1]; velocity = [1;0;0]; R = Rot3.Expmap(omega * deltaT); -velocity2body = R.unrotate(Point3(velocity)).vector; +velocity2body = R.unrotate(Point3(velocity)); acc_omegaExpected = [-0.01; 0; 0; 0; 0; 0.1]; acc_omegaActual = imuSimulator.calculateIMUMeasurement(omega, omega, velocity, velocity2body, deltaT, Pose3(Rot3, Point3([1;0;0]))); disp('IMU measurement discrepancy:'); @@ -40,7 +40,7 @@ disp(acc_omegaActual - acc_omegaExpected); initialPose = Pose3; initialVelocity = velocity; finalPoseExpected = Pose3.Expmap([ omega; velocity ] * deltaT); -finalVelocityExpected = finalPoseExpected.rotation.rotate(Point3(velocity)).vector; +finalVelocityExpected = finalPoseExpected.rotation.rotate(Point3(velocity)); [ finalPoseActual, finalVelocityActual ] = imuSimulator.integrateTrajectory(initialPose, omega, velocity, velocity, deltaT); disp('Final pose discrepancy:'); disp(finalPoseExpected.between(finalPoseActual).matrix); diff --git a/matlab/unstable_examples/+imuSimulator/test2constglobal.m b/matlab/unstable_examples/+imuSimulator/test2constglobal.m index 19956d08a..6ab35d50b 100644 --- a/matlab/unstable_examples/+imuSimulator/test2constglobal.m +++ b/matlab/unstable_examples/+imuSimulator/test2constglobal.m @@ -21,12 +21,12 @@ positions = zeros(3, length(times)+1); i = 2; for t = times - velocity1body = currentPoseGlobal.rotation.unrotate(Point3(currentVelocityGlobal)).vector; + velocity1body = currentPoseGlobal.rotation.unrotate(Point3(currentVelocityGlobal)); R = Rot3.Expmap(omega * deltaT); - velocity2body = currentPoseGlobal.rotation.compose(R).unrotate(Point3(currentVelocityGlobal)).vector; + velocity2body = currentPoseGlobal.rotation.compose(R).unrotate(Point3(currentVelocityGlobal)); [ currentPoseGlobal, currentVelocityGlobal ] = imuSimulator.integrateTrajectory(currentPoseGlobal, omega, velocity1body, velocity2body, deltaT); - positions(:,i) = currentPoseGlobal.translation.vector; + positions(:,i) = currentPoseGlobal.translation; i = i + 1; end diff --git a/matlab/unstable_examples/+imuSimulator/test3constbody.m b/matlab/unstable_examples/+imuSimulator/test3constbody.m index b3ee2edfc..8ee14ab78 100644 --- a/matlab/unstable_examples/+imuSimulator/test3constbody.m +++ b/matlab/unstable_examples/+imuSimulator/test3constbody.m @@ -26,27 +26,27 @@ currentPoseGlobal = Pose3; currentVelocityGlobal = velocity; % Initial state (IMU) currentPoseGlobalIMU = Pose3; %currentPoseGlobal.compose(IMUinBody); -%currentVelocityGlobalIMU = IMUinBody.rotation.unrotate(Point3(velocity)).vector; % no Coriolis here? +%currentVelocityGlobalIMU = IMUinBody.rotation.unrotate(Point3(velocity)); % no Coriolis here? currentVelocityGlobalIMU = IMUinBody.rotation.unrotate( ... - Point3(velocity + cross(omega, IMUinBody.translation.vector))).vector; + Point3(velocity + cross(omega, IMUinBody.translation))); % Positions % body trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; % Expected IMU trajectory (from body trajectory and lever arm) positionsIMUe = zeros(3, length(times)+1); -positionsIMUe(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation.vector; +positionsIMUe(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation; posesIMUe(1).p = positionsIMUe(:,1); posesIMUe(1).R = poses(1).R * IMUinBody.rotation.matrix; % Integrated IMU trajectory (from IMU measurements) positionsIMU = zeros(3, length(times)+1); -positionsIMU(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation.vector; +positionsIMU(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation; posesIMU(1).p = positionsIMU(:,1); posesIMU(1).R = IMUinBody.compose(currentPoseGlobalIMU).rotation.matrix; @@ -62,9 +62,9 @@ for t = times currentPoseGlobalIMU, currentVelocityGlobalIMU, acc_omega, deltaT); % Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; - positionsIMUe(:,i) = currentPoseGlobal.translation.vector + currentPoseGlobal.rotation.matrix * IMUinBody.translation.vector; - positionsIMU(:,i) = IMUinBody.compose(currentPoseGlobalIMU).translation.vector; + positions(:,i) = currentPoseGlobal.translation; + positionsIMUe(:,i) = currentPoseGlobal.translation + currentPoseGlobal.rotation.matrix * IMUinBody.translation; + positionsIMU(:,i) = IMUinBody.compose(currentPoseGlobalIMU).translation; poses(i).p = positions(:,i); posesIMUe(i).p = positionsIMUe(:,i); diff --git a/matlab/unstable_examples/+imuSimulator/test4circle.m b/matlab/unstable_examples/+imuSimulator/test4circle.m index 22ee175dd..ab2c546db 100644 --- a/matlab/unstable_examples/+imuSimulator/test4circle.m +++ b/matlab/unstable_examples/+imuSimulator/test4circle.m @@ -34,19 +34,19 @@ currentVelocityGlobalIMUbody = currentVelocityGlobal; %% Prepare data structures for actual trajectory and estimates % Actual trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; % Trajectory estimate (integrated in the navigation frame) positionsIMUnav = zeros(3, length(times)+1); -positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation; posesIMUnav(1).p = positionsIMUnav(:,1); posesIMUnav(1).R = poses(1).R; % Trajectory estimate (integrated in the body frame) positionsIMUbody = zeros(3, length(times)+1); -positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation; posesIMUbody(1).p = positionsIMUbody(:,1); posesIMUbody(1).R = poses(1).R; @@ -72,9 +72,9 @@ for t = times currentPoseGlobalIMUnav, currentVelocityGlobalIMUnav, acc_omega, deltaT); %% Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; - positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation.vector; - positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation.vector; + positions(:,i) = currentPoseGlobal.translation; + positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation; + positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation; % - poses(i).p = positions(:,i); posesIMUbody(i).p = positionsIMUbody(:,i); diff --git a/matlab/unstable_examples/FlightCameraTransformIMU.m b/matlab/unstable_examples/FlightCameraTransformIMU.m index 9a8a27344..d2f2bc34d 100644 --- a/matlab/unstable_examples/FlightCameraTransformIMU.m +++ b/matlab/unstable_examples/FlightCameraTransformIMU.m @@ -120,7 +120,7 @@ for i=1:size(trajectory)-1 end % current ground-truth position indicator - h_cursor = plot3(a1, pose_t.x,pose_t.y,pose_t.z,'*'); + h_cursor = plot3(a1, pose_t(1),pose_t(2),pose_t(3),'*'); camera_pose = pose.compose(camera_transform); @@ -198,7 +198,7 @@ for i=1:size(trajectory)-1 if ~result.exists(lKey) p = landmarks.atPoint3(lKey); n = normrnd(0,landmark_noise,3,1); - noisy_landmark = Point3(p.x()+n(1),p.y()+n(2),p.z()+n(3)); + noisy_landmark = p + n; initial.insert(lKey, noisy_landmark); % and add a prior since its position is known @@ -245,32 +245,33 @@ for i=1:size(trajectory)-1 initial = Values; fg = NonlinearFactorGraph; - currentVelocityGlobal = result.at(currentVelKey); - currentBias = result.at(currentBiasKey); + currentVelocityGlobal = result.atPoint3(currentVelKey); + currentBias = result.atConstantBias(currentBiasKey); %% plot current pose result - isam_pose = result.at(xKey); + isam_pose = result.atPose3(xKey); pose_t = isam_pose.translation(); if exist('h_result','var') delete(h_result); end - h_result = plot3(a1, pose_t.x,pose_t.y,pose_t.z,'^b', 'MarkerSize', 10); + h_result = plot3(a1, pose_t(1),pose_t(2),pose_t(3),'^b', 'MarkerSize', 10); title(a1, sprintf('Step %d', i)); if exist('h_text1(1)', 'var') delete(h_text1(1)); % delete(h_text2(1)); end - ty = result.at(transformKey).translation().y(); - K_estimate = result.at(calibrationKey); + t = result.atPose3(transformKey).translation(); + ty = t(2); + K_estimate = result.atCal3_S2(calibrationKey); K_errors = K.localCoordinates(K_estimate); - camera_transform_estimate = result.at(transformKey); + camera_transform_estimate = result.atPose3(transformKey); - fx = result.at(calibrationKey).fx(); - fy = result.at(calibrationKey).fy(); + fx = result.atCal3_S2(calibrationKey).fx(); + fy = result.atCal3_S2(calibrationKey).fy(); % h_text1 = text(-600,0,0,sprintf('Y-Transform(0.0): %0.2f',ty)); text(0,1300,0,sprintf('Calibration and IMU-cam transform errors:')); @@ -304,7 +305,7 @@ for i=1:size(trajectory)-1 end %% print out final camera transform and write video -result.at(transformKey); +result.atPose3(transformKey); if(write_video) close(videoObj); end \ No newline at end of file diff --git a/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m b/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m index 4557d711f..9796a9737 100644 --- a/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m +++ b/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m @@ -53,7 +53,7 @@ y_shift = Point3(0,0.5,0); % insert shifted points for i=1:nrPoints - initial.insert(100+i,landmarks{i}.compose(y_shift)); + initial.insert(100+i,landmarks{i} + y_shift); end figure(1); @@ -134,7 +134,7 @@ for i=1:steps end if i == 2 fg.add(PriorFactorPose3(2, Pose3(Rot3(),Point3(1,0,0)),pose_cov)); - fg.add(PriorFactorLieVector(currentVelKey, currentVelocityGlobal, sigma_init_v)); + fg.add(PriorFactorVector(currentVelKey, currentVelocityGlobal, sigma_init_v)); fg.add(PriorFactorConstantBias(currentBiasKey, currentBias, sigma_init_b)); end if i > 1 @@ -144,7 +144,7 @@ for i=1:steps step = move_circle; end - initial.insert(i,result.at(i-1).compose(step)); + initial.insert(i,result.atPose3(i-1).compose(step)); fg.add(BetweenFactorPose3(i-1,i, step, covariance)); deltaT = 1; @@ -158,10 +158,13 @@ for i=1:steps [ currentIMUPoseGlobal, currentVelocityGlobal ] = imuSimulator.integrateTrajectory( ... currentIMUPoseGlobal, omega, velocity, velocity, deltaT); - - currentSummarizedMeasurement = gtsam.ImuFactorPreintegratedMeasurements( ... - currentBias, IMU_metadata.AccelerometerSigma.^2 * eye(3), ... - IMU_metadata.GyroscopeSigma.^2 * eye(3), IMU_metadata.IntegrationSigma.^2 * eye(3)); + + params = gtsam.PreintegrationParams.MakeSharedD(9.81); + params.setAccelerometerCovariance(IMU_metadata.AccelerometerSigma.^2 * eye(3)); + params.setGyroscopeCovariance(IMU_metadata.GyroscopeSigma.^2 * eye(3)); + params.setIntegrationCovariance(IMU_metadata.IntegrationSigma.^2 * eye(3)); + currentSummarizedMeasurement = gtsam.PreintegratedImuMeasurements( ... + params, currentBias); accMeas = acc_omega(1:3)-g; omegaMeas = acc_omega(4:6); @@ -171,7 +174,7 @@ for i=1:steps fg.add(ImuFactor( ... i-1, currentVelKey-1, ... i, currentVelKey, ... - currentBiasKey, currentSummarizedMeasurement, g, w_coriolis)); + currentBiasKey, currentSummarizedMeasurement)); % Bias evolution as given in the IMU metadata fg.add(BetweenFactorConstantBias(currentBiasKey-1, currentBiasKey, imuBias.ConstantBias(zeros(3,1), zeros(3,1)), ... @@ -204,8 +207,8 @@ for i=1:steps initial = Values; fg = NonlinearFactorGraph; - currentVelocityGlobal = isam.calculateEstimate(currentVelKey); - currentBias = isam.calculateEstimate(currentBiasKey); + currentVelocityGlobal = isam.calculateEstimate().atVector(currentVelKey); + currentBias = isam.calculateEstimate().atConstantBias(currentBiasKey); %% Compute some marginals marginal = isam.marginalCovariance(calibrationKey); @@ -249,10 +252,10 @@ for i=1:steps gtsam.plotPose3(currentIMUPoseGlobal, [], 2); %% plot results - result_camera_transform = result.at(transformKey); + result_camera_transform = result.atPose3(transformKey); for j=1:i - gtsam.plotPose3(result.at(j),[],0.5); - gtsam.plotPose3(result.at(j).compose(result_camera_transform),[],0.5); + gtsam.plotPose3(result.atPose3(j),[],0.5); + gtsam.plotPose3(result.atPose3(j).compose(result_camera_transform),[],0.5); end xlabel('x (m)'); @@ -265,14 +268,15 @@ for i=1:steps % axis equal for l=101:100+nrPoints - plotPoint3(result.at(l),'g'); + plotPoint3(result.atPoint3(l),'g'); end - ty = result.at(transformKey).translation().y(); - fx = result.at(calibrationKey).fx(); - fy = result.at(calibrationKey).fy(); - px = result.at(calibrationKey).px(); - py = result.at(calibrationKey).py(); + t = result.atPose3(transformKey).translation(); + ty = t(2); + fx = result.atCal3_S2(calibrationKey).fx(); + fy = result.atCal3_S2(calibrationKey).fy(); + px = result.atCal3_S2(calibrationKey).px(); + py = result.atCal3_S2(calibrationKey).py(); text(1,5,5,sprintf('Y-Transform(0.0): %0.2f',ty)); text(1,5,3,sprintf('fx(900): %.0f',fx)); text(1,5,1,sprintf('fy(900): %.0f',fy)); @@ -342,10 +346,10 @@ end fprintf('Cheirality Exception count: %d\n', cheirality_exception_count); disp('Transform after optimization'); -result.at(transformKey) +result.atPose3(transformKey) disp('Calibration after optimization'); -result.at(calibrationKey) +result.atCal3_S2(calibrationKey) disp('Bias after optimization'); currentBias diff --git a/matlab/unstable_examples/TransformProjectionFactorExample.m b/matlab/unstable_examples/TransformProjectionFactorExample.m index 79a96209d..f6a896cff 100644 --- a/matlab/unstable_examples/TransformProjectionFactorExample.m +++ b/matlab/unstable_examples/TransformProjectionFactorExample.m @@ -33,7 +33,7 @@ y_shift = Point3(0,1,0); % insert shifted points for i=1:nrPoints - initial.insert(100+i,landmarks{i}.compose(y_shift)); + initial.insert(100+i,landmarks{i} + y_shift); end figure(1); diff --git a/matlab/unstable_examples/TransformProjectionFactorExampleISAM.m b/matlab/unstable_examples/TransformProjectionFactorExampleISAM.m index ca5b70c62..88f6cc63c 100644 --- a/matlab/unstable_examples/TransformProjectionFactorExampleISAM.m +++ b/matlab/unstable_examples/TransformProjectionFactorExampleISAM.m @@ -47,7 +47,7 @@ y_shift = Point3(0,1,0); % insert shifted points for i=1:nrPoints - initial.insert(100+i,landmarks{i}.compose(y_shift)); + initial.insert(100+i,landmarks{i} + y_shift); end figure(1); @@ -146,7 +146,8 @@ for i=1:20 plotPoint3(result.atPoint3(l),'g'); end - ty = result.atPose3(1000).translation().y(); + t = result.atPose3(1000).translation(); + ty = t(2); text(5,5,5,sprintf('Y-Transform: %0.2g',ty)); if(write_video) diff --git a/matlab/unstable_examples/plot_projected_landmarks.m b/matlab/unstable_examples/plot_projected_landmarks.m index 6b8101844..30e222016 100644 --- a/matlab/unstable_examples/plot_projected_landmarks.m +++ b/matlab/unstable_examples/plot_projected_landmarks.m @@ -25,9 +25,9 @@ for i = 0:measurement_keys.size-1 key_index = gtsam.symbolIndex(key); p = landmarks.atPoint3(gtsam.symbol('l',key_index)); - x(i+1) = p.x; - y(i+1) = p.y; - z(i+1) = p.z; + x(i+1) = p(1); + y(i+1) = p(2); + z(i+1) = p(3); end diff --git a/matlab/unstable_examples/project_landmarks.m b/matlab/unstable_examples/project_landmarks.m index aaccc9248..3bccef94b 100644 --- a/matlab/unstable_examples/project_landmarks.m +++ b/matlab/unstable_examples/project_landmarks.m @@ -11,9 +11,9 @@ function [ measurements ] = project_landmarks( pose, landmarks, K ) z = camera.project(landmarks.atPoint3(symbol('l',i))); % check bounding box - if z.x < 0 || z.x > 1280 + if z(1) < 0 || z(1) > 1280 continue - elseif z.y < 0 || z.y > 960 + elseif z(2) < 0 || z(2) > 960 continue end diff --git a/matlab/unstable_examples/testTSAMFactors.m b/matlab/unstable_examples/testTSAMFactors.m index 5cfd0aa80..2d1de5855 100644 --- a/matlab/unstable_examples/testTSAMFactors.m +++ b/matlab/unstable_examples/testTSAMFactors.m @@ -50,8 +50,8 @@ result = optimizer.optimize(); % Check result CHECK('error',result.atPose2(100).equals(b1,1e-5)) CHECK('error',result.atPose2(10).equals(origin,1e-5)) -CHECK('error',result.atPoint2(1).equals(Point2(0,1),1e-5)) -CHECK('error',result.atPoint2(2).equals(Point2(0,1),1e-5)) +CHECK('error',result.atPoint2(1) - Point2(0,1) < 1e-5) +CHECK('error',result.atPoint2(2) - Point2(0,1) < 1e-5) CHECK('error',result.atPose2(20).equals(origin,1e-5)) CHECK('error',result.atPose2(200).equals(b2,1e-5)) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt new file mode 100644 index 000000000..23898f61d --- /dev/null +++ b/python/CMakeLists.txt @@ -0,0 +1,108 @@ +set(GTSAM_PYTHON_BUILD_DIRECTORY ${PROJECT_BINARY_DIR}/python) + +if (NOT GTSAM_BUILD_PYTHON) + return() +endif() + +# Generate setup.py. +file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS) +configure_file(${PROJECT_SOURCE_DIR}/python/setup.py.in + ${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py) + +set(WRAP_USE_CUSTOM_PYTHON_LIBRARY ${GTSAM_USE_CUSTOM_PYTHON_LIBRARY}) +set(WRAP_PYTHON_VERSION ${GTSAM_PYTHON_VERSION}) + +include(PybindWrap) + +add_custom_target(gtsam_header DEPENDS "${PROJECT_SOURCE_DIR}/gtsam/gtsam.i") +add_custom_target(gtsam_unstable_header DEPENDS "${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i") + +# ignoring the non-concrete types (type aliases) +set(ignore + gtsam::Point2 + gtsam::Point3 + gtsam::LieVector + gtsam::LieMatrix + gtsam::ISAM2ThresholdMapValue + gtsam::FactorIndices + gtsam::FactorIndexSet + gtsam::BetweenFactorPose2s + gtsam::BetweenFactorPose3s + gtsam::Point2Vector + gtsam::Pose3Vector + gtsam::KeyVector) + +pybind_wrap(gtsam_py # target + ${PROJECT_SOURCE_DIR}/gtsam/gtsam.i # interface_header + "gtsam.cpp" # generated_cpp + "gtsam" # module_name + "gtsam" # top_namespace + "${ignore}" # ignore_classes + ${PROJECT_SOURCE_DIR}/python/gtsam/gtsam.tpl + gtsam # libs + "gtsam;gtsam_header" # dependencies + ON # use_boost + ) + +set_target_properties(gtsam_py PROPERTIES + INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" + INSTALL_RPATH_USE_LINK_PATH TRUE + OUTPUT_NAME "gtsam" + LIBRARY_OUTPUT_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam" + DEBUG_POSTFIX "" # Otherwise you will have a wrong name + RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name + ) + +set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam) + +# Symlink all tests .py files to build folder. +create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam" + "${GTSAM_MODULE_PATH}") + +if(GTSAM_UNSTABLE_BUILD_PYTHON) +set(ignore + gtsam::Point2 + gtsam::Point3 + gtsam::LieVector + gtsam::LieMatrix + gtsam::ISAM2ThresholdMapValue + gtsam::FactorIndices + gtsam::FactorIndexSet + gtsam::BetweenFactorPose3s + gtsam::Point2Vector + gtsam::Pose3Vector + gtsam::KeyVector + gtsam::FixedLagSmootherKeyTimestampMapValue) +pybind_wrap(gtsam_unstable_py # target + ${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header + "gtsam_unstable.cpp" # generated_cpp + "gtsam_unstable" # module_name + "gtsam" # top_namespace + "${ignore}" # ignore_classes + ${PROJECT_SOURCE_DIR}/python/gtsam_unstable/gtsam_unstable.tpl + gtsam_unstable # libs + "gtsam_unstable;gtsam_unstable_header" # dependencies + ON # use_boost + ) + +set_target_properties(gtsam_unstable_py PROPERTIES + INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" + INSTALL_RPATH_USE_LINK_PATH TRUE + OUTPUT_NAME "gtsam_unstable" + LIBRARY_OUTPUT_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable" + DEBUG_POSTFIX "" # Otherwise you will have a wrong name + RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name + ) + +set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable) + +# Symlink all tests .py files to build folder. +create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable" + "${GTSAM_UNSTABLE_MODULE_PATH}") +endif() + +set(GTSAM_PYTHON_INSTALL_TARGET python-install) +add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET} + COMMAND ${PYTHON_EXECUTABLE} ${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py install + DEPENDS gtsam_py gtsam_unstable_py + WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}) diff --git a/python/README.md b/python/README.md new file mode 100644 index 000000000..b1a3a865f --- /dev/null +++ b/python/README.md @@ -0,0 +1,65 @@ +# README + +# Python Wrapper + +This is the Python wrapper around the GTSAM C++ library. We use our custom [wrap library](https://github.com/borglab/wrap) to generate the bindings to the underlying C++ code. + +## Requirements + +- If you want to build the GTSAM python library for a specific python version (eg 3.6), + use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used. +- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment), + then the environment should be active while building GTSAM. +- This wrapper needs `pyparsing(>=2.4.2)`, and `numpy(>=1.11.0)`. These can be installed as follows: + + ```bash + pip install -r /python/requirements.txt + ``` + +## Install + +- Run cmake with the `GTSAM_BUILD_PYTHON` cmake flag enabled to configure building the wrapper. The wrapped module will be built and copied to the directory `/python`. + +- Build GTSAM and the wrapper with `make` (or `ninja` if you use `-GNinja`). + +- To install, simply run `make python-install` (`ninja python-install`). + - The same command can be used to install into a virtual environment if it is active. + - **NOTE**: if you don't want GTSAM to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install GTSAM to a subdirectory of the build directory. + +- You can also directly run `make python-install` without running `make`, and it will compile all the dependencies accordingly. + +## Unit Tests + +The Python toolbox also has a small set of unit tests located in the +test directory. To run them: + + ```bash + cd /python/gtsam/tests + python -m unittest discover + ``` + +## Utils + +TODO + +## Examples + +TODO + +## Writing Your Own Scripts + +See the tests for examples. + +### Some Important Notes: + +- Vector/Matrix: + + - GTSAM expects double-precision floating point vectors and matrices. + Hence, you should pass numpy matrices with `dtype=float`, or `float64`, to avoid any conversion needed. + - Also, GTSAM expects _column-major_ matrices, unlike the default storage + scheme in numpy. But this is only performance-related as `pybind11` should translate them when needed. However, this will result a copy if your matrix is not in the expected type + and storage order. + +## Wrapping Custom GTSAM-based Project + +Please refer to the template project and the corresponding tutorial available [here](https://github.com/borglab/GTSAM-project-python). diff --git a/python/gtsam/__init__.py b/python/gtsam/__init__.py new file mode 100644 index 000000000..b2d1fb7e7 --- /dev/null +++ b/python/gtsam/__init__.py @@ -0,0 +1,33 @@ +from .gtsam import * + + +def _init(): + """This function is to add shims for the long-gone Point2 and Point3 types""" + + import numpy as np + + global Point2 # export function + + def Point2(x=np.nan, y=np.nan): + """Shim for the deleted Point2 type.""" + if isinstance(x, np.ndarray): + assert x.shape == (2,), "Point2 takes 2-vector" + return x # "copy constructor" + return np.array([x, y], dtype=float) + + global Point3 # export function + + def Point3(x=np.nan, y=np.nan, z=np.nan): + """Shim for the deleted Point3 type.""" + if isinstance(x, np.ndarray): + assert x.shape == (3,), "Point3 takes 3-vector" + return x # "copy constructor" + return np.array([x, y, z], dtype=float) + + # for interactive debugging + if __name__ == "__main__": + # we want all definitions accessible + globals().update(locals()) + + +_init() diff --git a/cython/gtsam/examples/DogLegOptimizerExample.py b/python/gtsam/examples/DogLegOptimizerExample.py similarity index 95% rename from cython/gtsam/examples/DogLegOptimizerExample.py rename to python/gtsam/examples/DogLegOptimizerExample.py index 776ceedc4..26f4fef84 100644 --- a/cython/gtsam/examples/DogLegOptimizerExample.py +++ b/python/gtsam/examples/DogLegOptimizerExample.py @@ -35,17 +35,17 @@ def run(args): graph = gtsam.NonlinearFactorGraph() # Priors - prior = gtsam.noiseModel_Isotropic.Sigma(3, 1) + prior = gtsam.noiseModel.Isotropic.Sigma(3, 1) graph.add(gtsam.PriorFactorPose2(11, T11, prior)) graph.add(gtsam.PriorFactorPose2(21, T21, prior)) # Odometry - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.01, 0.01, 0.3])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.01, 0.01, 0.3])) graph.add(gtsam.BetweenFactorPose2(11, 12, T11.between(T12), model)) graph.add(gtsam.BetweenFactorPose2(21, 22, T21.between(T22), model)) # Range - model_rho = gtsam.noiseModel_Isotropic.Sigma(1, 0.01) + model_rho = gtsam.noiseModel.Isotropic.Sigma(1, 0.01) graph.add(gtsam.RangeFactorPose2(12, 22, 1.0, model_rho)) params = gtsam.DoglegParams() diff --git a/cython/gtsam/examples/GPSFactorExample.py b/python/gtsam/examples/GPSFactorExample.py similarity index 93% rename from cython/gtsam/examples/GPSFactorExample.py rename to python/gtsam/examples/GPSFactorExample.py index 493a07725..0bc0d1bf3 100644 --- a/cython/gtsam/examples/GPSFactorExample.py +++ b/python/gtsam/examples/GPSFactorExample.py @@ -26,8 +26,8 @@ lon0 = -84.30626 h0 = 274 # Create noise models -GPS_NOISE = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) -PRIOR_NOISE = gtsam.noiseModel_Isotropic.Sigma(6, 0.25) +GPS_NOISE = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) +PRIOR_NOISE = gtsam.noiseModel.Isotropic.Sigma(6, 0.25) # Create an empty nonlinear factor graph graph = gtsam.NonlinearFactorGraph() diff --git a/python/gtsam/examples/ImuFactorExample.py b/python/gtsam/examples/ImuFactorExample.py new file mode 100644 index 000000000..eec7c5ebd --- /dev/null +++ b/python/gtsam/examples/ImuFactorExample.py @@ -0,0 +1,180 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +A script validating and demonstrating the ImuFactor inference. + +Author: Frank Dellaert, Varun Agrawal +""" + +from __future__ import print_function + +import argparse +import math + +import gtsam +import matplotlib.pyplot as plt +import numpy as np +from gtsam.symbol_shorthand import B, V, X +from gtsam.utils.plot import plot_pose3 +from mpl_toolkits.mplot3d import Axes3D + +from PreintegrationExample import POSES_FIG, PreintegrationExample + +BIAS_KEY = B(0) + + +np.set_printoptions(precision=3, suppress=True) + + +class ImuFactorExample(PreintegrationExample): + + def __init__(self, twist_scenario="sick_twist"): + self.velocity = np.array([2, 0, 0]) + self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) + self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) + + # Choose one of these twists to change scenario: + twist_scenarios = dict( + zero_twist=(np.zeros(3), np.zeros(3)), + forward_twist=(np.zeros(3), self.velocity), + loop_twist=(np.array([0, -math.radians(30), 0]), self.velocity), + sick_twist=(np.array([math.radians(30), -math.radians(30), 0]), + self.velocity) + ) + + accBias = np.array([-0.3, 0.1, 0.2]) + gyroBias = np.array([0.1, 0.3, -0.1]) + bias = gtsam.imuBias.ConstantBias(accBias, gyroBias) + + dt = 1e-2 + super(ImuFactorExample, self).__init__(twist_scenarios[twist_scenario], + bias, dt) + + def addPrior(self, i, graph): + state = self.scenario.navState(i) + graph.push_back(gtsam.PriorFactorPose3( + X(i), state.pose(), self.priorNoise)) + graph.push_back(gtsam.PriorFactorVector( + V(i), state.velocity(), self.velNoise)) + + def run(self, T=12, compute_covariances=False, verbose=True): + graph = gtsam.NonlinearFactorGraph() + + # initialize data structure for pre-integrated IMU measurements + pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias) + + T = 12 + num_poses = T # assumes 1 factor per second + initial = gtsam.Values() + initial.insert(BIAS_KEY, self.actualBias) + + # simulate the loop + i = 0 # state index + initial_state_i = self.scenario.navState(0) + initial.insert(X(i), initial_state_i.pose()) + initial.insert(V(i), initial_state_i.velocity()) + + # add prior on beginning + self.addPrior(0, graph) + + for k, t in enumerate(np.arange(0, T, self.dt)): + # get measurements and add them to PIM + measuredOmega = self.runner.measuredAngularVelocity(t) + measuredAcc = self.runner.measuredSpecificForce(t) + pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt) + + # Plot IMU many times + if k % 10 == 0: + self.plotImu(t, measuredOmega, measuredAcc) + + if (k+1) % int(1 / self.dt) == 0: + # Plot every second + self.plotGroundTruthPose(t, scale=1) + plt.title("Ground Truth Trajectory") + + # create IMU factor every second + factor = gtsam.ImuFactor(X(i), V(i), + X(i + 1), V(i + 1), + BIAS_KEY, pim) + graph.push_back(factor) + + if verbose: + print(factor) + print(pim.predict(actual_state_i, self.actualBias)) + + pim.resetIntegration() + + rotationNoise = gtsam.Rot3.Expmap(np.random.randn(3)*0.1) + translationNoise = gtsam.Point3(*np.random.randn(3)*1) + poseNoise = gtsam.Pose3(rotationNoise, translationNoise) + + actual_state_i = self.scenario.navState(t + self.dt) + print("Actual state at {0}:\n{1}".format( + t+self.dt, actual_state_i)) + + noisy_state_i = gtsam.NavState( + actual_state_i.pose().compose(poseNoise), + actual_state_i.velocity() + np.random.randn(3)*0.1) + + initial.insert(X(i+1), noisy_state_i.pose()) + initial.insert(V(i+1), noisy_state_i.velocity()) + i += 1 + + # add priors on end + # self.addPrior(num_poses - 1, graph) + + initial.print_("Initial values:") + + # optimize using Levenberg-Marquardt optimization + params = gtsam.LevenbergMarquardtParams() + params.setVerbosityLM("SUMMARY") + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) + result = optimizer.optimize() + + result.print_("Optimized values:") + + if compute_covariances: + # Calculate and print marginal covariances + marginals = gtsam.Marginals(graph, result) + print("Covariance on bias:\n", + marginals.marginalCovariance(BIAS_KEY)) + for i in range(num_poses): + print("Covariance on pose {}:\n{}\n".format( + i, marginals.marginalCovariance(X(i)))) + print("Covariance on vel {}:\n{}\n".format( + i, marginals.marginalCovariance(V(i)))) + + # Plot resulting poses + i = 0 + while result.exists(X(i)): + pose_i = result.atPose3(X(i)) + plot_pose3(POSES_FIG+1, pose_i, 1) + i += 1 + plt.title("Estimated Trajectory") + + gtsam.utils.plot.set_axes_equal(POSES_FIG+1) + + print("Bias Values", result.atConstantBias(BIAS_KEY)) + + plt.ioff() + plt.show() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser("ImuFactorExample.py") + parser.add_argument("--twist_scenario", + default="sick_twist", + choices=("zero_twist", "forward_twist", "loop_twist", "sick_twist")) + parser.add_argument("--time", "-T", default=12, + type=int, help="Total time in seconds") + parser.add_argument("--compute_covariances", + default=False, action='store_true') + parser.add_argument("--verbose", default=False, action='store_true') + args = parser.parse_args() + + ImuFactorExample(args.twist_scenario).run( + args.time, args.compute_covariances, args.verbose) diff --git a/cython/gtsam/examples/ImuFactorExample2.py b/python/gtsam/examples/ImuFactorISAM2Example.py similarity index 61% rename from cython/gtsam/examples/ImuFactorExample2.py rename to python/gtsam/examples/ImuFactorISAM2Example.py index 0d98f08fe..98f6218ea 100644 --- a/cython/gtsam/examples/ImuFactorExample2.py +++ b/python/gtsam/examples/ImuFactorISAM2Example.py @@ -1,6 +1,6 @@ """ iSAM2 example with ImuFactor. -Author: Robert Truax (C++), Frank Dellaert (Python) +Author: Robert Truax (C++), Frank Dellaert, Varun Agrawal """ # pylint: disable=invalid-name, E1101 @@ -9,7 +9,6 @@ from __future__ import print_function import math import gtsam -import gtsam.utils.plot as gtsam_plot import matplotlib.pyplot as plt import numpy as np from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2, @@ -17,9 +16,8 @@ from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2, PinholeCameraCal3_S2, Point3, Pose3, PriorFactorConstantBias, PriorFactorPose3, PriorFactorVector, Rot3, Values) -from gtsam import symbol_shorthand_B as B -from gtsam import symbol_shorthand_V as V -from gtsam import symbol_shorthand_X as X +from gtsam.symbol_shorthand import B, V, X +from gtsam.utils import plot from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 @@ -28,46 +26,45 @@ def vector3(x, y, z): return np.array([x, y, z], dtype=np.float) -def ISAM2_plot(values, fignum=0): - """Plot poses.""" - fig = plt.figure(fignum) - axes = fig.gca(projection='3d') - plt.cla() - - i = 0 - min_bounds = 0, 0, 0 - max_bounds = 0, 0, 0 - while values.exists(X(i)): - pose_i = values.atPose3(X(i)) - gtsam_plot.plot_pose3(fignum, pose_i, 10) - position = pose_i.translation().vector() - min_bounds = [min(v1, v2) for (v1, v2) in zip(position, min_bounds)] - max_bounds = [max(v1, v2) for (v1, v2) in zip(position, max_bounds)] - # max_bounds = min(pose_i.x(), max_bounds[0]), 0, 0 - i += 1 - - # draw - axes.set_xlim3d(min_bounds[0]-1, max_bounds[0]+1) - axes.set_ylim3d(min_bounds[1]-1, max_bounds[1]+1) - axes.set_zlim3d(min_bounds[2]-1, max_bounds[2]+1) - plt.pause(1) - - -# IMU preintegration parameters -# Default Params for a Z-up navigation frame, such as ENU: gravity points along negative Z-axis g = 9.81 n_gravity = vector3(0, 0, -g) -PARAMS = gtsam.PreintegrationParams.MakeSharedU(g) -I = np.eye(3) -PARAMS.setAccelerometerCovariance(I * 0.1) -PARAMS.setGyroscopeCovariance(I * 0.1) -PARAMS.setIntegrationCovariance(I * 0.1) -PARAMS.setUse2ndOrderCoriolis(False) -PARAMS.setOmegaCoriolis(vector3(0, 0, 0)) -BIAS_COVARIANCE = gtsam.noiseModel_Isotropic.Variance(6, 0.1) -DELTA = Pose3(Rot3.Rodrigues(0, 0, 0), - Point3(0.05, -0.10, 0.20)) + +def preintegration_parameters(): + # IMU preintegration parameters + # Default Params for a Z-up navigation frame, such as ENU: gravity points along negative Z-axis + PARAMS = gtsam.PreintegrationParams.MakeSharedU(g) + I = np.eye(3) + PARAMS.setAccelerometerCovariance(I * 0.1) + PARAMS.setGyroscopeCovariance(I * 0.1) + PARAMS.setIntegrationCovariance(I * 0.1) + PARAMS.setUse2ndOrderCoriolis(False) + PARAMS.setOmegaCoriolis(vector3(0, 0, 0)) + + BIAS_COVARIANCE = gtsam.noiseModel.Isotropic.Variance(6, 0.1) + DELTA = Pose3(Rot3.Rodrigues(0, 0, 0), + Point3(0.05, -0.10, 0.20)) + + return PARAMS, BIAS_COVARIANCE, DELTA + + +def get_camera(radius): + up = Point3(0, 0, 1) + target = Point3(0, 0, 0) + position = Point3(radius, 0, 0) + camera = PinholeCameraCal3_S2.Lookat(position, target, up, Cal3_S2()) + return camera + + +def get_scenario(radius, pose_0, angular_velocity, delta_t): + """Create the set of ground-truth landmarks and poses""" + + angular_velocity_vector = vector3(0, -angular_velocity, 0) + linear_velocity_vector = vector3(radius * angular_velocity, 0, 0) + scenario = ConstantTwistScenario( + angular_velocity_vector, linear_velocity_vector, pose_0) + + return scenario def IMU_example(): @@ -75,23 +72,17 @@ def IMU_example(): # Start with a camera on x-axis looking at origin radius = 30 - up = Point3(0, 0, 1) - target = Point3(0, 0, 0) - position = Point3(radius, 0, 0) - camera = PinholeCameraCal3_S2.Lookat(position, target, up, Cal3_S2()) + camera = get_camera(radius) pose_0 = camera.pose() - # Create the set of ground-truth landmarks and poses - angular_velocity = math.radians(180) # rad/sec delta_t = 1.0/18 # makes for 10 degrees per step + angular_velocity = math.radians(180) # rad/sec + scenario = get_scenario(radius, pose_0, angular_velocity, delta_t) - angular_velocity_vector = vector3(0, -angular_velocity, 0) - linear_velocity_vector = vector3(radius * angular_velocity, 0, 0) - scenario = ConstantTwistScenario( - angular_velocity_vector, linear_velocity_vector, pose_0) + PARAMS, BIAS_COVARIANCE, DELTA = preintegration_parameters() # Create a factor graph - newgraph = NonlinearFactorGraph() + graph = NonlinearFactorGraph() # Create (incremental) ISAM2 solver isam = ISAM2() @@ -102,23 +93,23 @@ def IMU_example(): # Add a prior on pose x0. This indirectly specifies where the origin is. # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw - noise = gtsam.noiseModel_Diagonal.Sigmas( + noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) - newgraph.push_back(PriorFactorPose3(X(0), pose_0, noise)) + graph.push_back(PriorFactorPose3(X(0), pose_0, noise)) # Add imu priors biasKey = B(0) - biasnoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) - biasprior = PriorFactorConstantBias(biasKey, gtsam.imuBias_ConstantBias(), + biasnoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) + biasprior = PriorFactorConstantBias(biasKey, gtsam.imuBias.ConstantBias(), biasnoise) - newgraph.push_back(biasprior) - initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias()) - velnoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + graph.push_back(biasprior) + initialEstimate.insert(biasKey, gtsam.imuBias.ConstantBias()) + velnoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) # Calculate with correct initial velocity n_velocity = vector3(0, angular_velocity * radius, 0) velprior = PriorFactorVector(V(0), n_velocity, velnoise) - newgraph.push_back(velprior) + graph.push_back(velprior) initialEstimate.insert(V(0), n_velocity) accum = gtsam.PreintegratedImuMeasurements(PARAMS) @@ -139,9 +130,9 @@ def IMU_example(): if i % 5 == 0: biasKey += 1 factor = BetweenFactorConstantBias( - biasKey - 1, biasKey, gtsam.imuBias_ConstantBias(), BIAS_COVARIANCE) - newgraph.add(factor) - initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias()) + biasKey - 1, biasKey, gtsam.imuBias.ConstantBias(), BIAS_COVARIANCE) + graph.add(factor) + initialEstimate.insert(biasKey, gtsam.imuBias.ConstantBias()) # Predict acceleration and gyro measurements in (actual) body frame nRb = scenario.rotation(t).matrix() @@ -152,21 +143,24 @@ def IMU_example(): # Add Imu Factor imufac = ImuFactor(X(i - 1), V(i - 1), X(i), V(i), biasKey, accum) - newgraph.add(imufac) + graph.add(imufac) # insert new velocity, which is wrong initialEstimate.insert(V(i), n_velocity) accum.resetIntegration() # Incremental solution - isam.update(newgraph, initialEstimate) + isam.update(graph, initialEstimate) result = isam.calculateEstimate() - ISAM2_plot(result) + plot.plot_incremental_trajectory(0, result, + start=i, scale=3, time_interval=0.01) # reset - newgraph = NonlinearFactorGraph() + graph = NonlinearFactorGraph() initialEstimate.clear() + plt.show() + if __name__ == '__main__': IMU_example() diff --git a/cython/gtsam/examples/OdometryExample.py b/python/gtsam/examples/OdometryExample.py similarity index 94% rename from cython/gtsam/examples/OdometryExample.py rename to python/gtsam/examples/OdometryExample.py index e778e3f85..8b519ce9a 100644 --- a/cython/gtsam/examples/OdometryExample.py +++ b/python/gtsam/examples/OdometryExample.py @@ -21,8 +21,8 @@ import matplotlib.pyplot as plt import gtsam.utils.plot as gtsam_plot # Create noise models -ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) -PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) +ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) +PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) # Create an empty nonlinear factor graph graph = gtsam.NonlinearFactorGraph() diff --git a/cython/gtsam/examples/PlanarManipulatorExample.py b/python/gtsam/examples/PlanarManipulatorExample.py similarity index 98% rename from cython/gtsam/examples/PlanarManipulatorExample.py rename to python/gtsam/examples/PlanarManipulatorExample.py index e42ae09d7..9af4f7fcc 100644 --- a/cython/gtsam/examples/PlanarManipulatorExample.py +++ b/python/gtsam/examples/PlanarManipulatorExample.py @@ -167,13 +167,11 @@ class ThreeLinkArm(object): axes = fig.gca() sXl1 = Pose2(0, 0, math.radians(90)) - t = sXl1.translation() - p1 = np.array([t.x(), t.y()]) + p1 = sXl1.translation() gtsam_plot.plot_pose2_on_axes(axes, sXl1) def plot_line(p, g, color): - t = g.translation() - q = np.array([t.x(), t.y()]) + q = g.translation() line = np.append(p[np.newaxis], q[np.newaxis], axis=0) axes.plot(line[:, 0], line[:, 1], color) return q diff --git a/cython/gtsam/examples/PlanarSLAMExample.py b/python/gtsam/examples/PlanarSLAMExample.py similarity index 91% rename from cython/gtsam/examples/PlanarSLAMExample.py rename to python/gtsam/examples/PlanarSLAMExample.py index c84f0f834..5ffdf048d 100644 --- a/cython/gtsam/examples/PlanarSLAMExample.py +++ b/python/gtsam/examples/PlanarSLAMExample.py @@ -13,15 +13,15 @@ Author: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python) from __future__ import print_function -import gtsam import numpy as np -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X + +import gtsam +from gtsam.symbol_shorthand import X, L # Create noise models -PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) -ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) -MEASUREMENT_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.2])) +PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) +ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) +MEASUREMENT_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.2])) # Create an empty nonlinear factor graph graph = gtsam.NonlinearFactorGraph() diff --git a/cython/gtsam/examples/Pose2SLAMExample.py b/python/gtsam/examples/Pose2SLAMExample.py similarity index 96% rename from cython/gtsam/examples/Pose2SLAMExample.py rename to python/gtsam/examples/Pose2SLAMExample.py index 680f2209f..2569f0953 100644 --- a/cython/gtsam/examples/Pose2SLAMExample.py +++ b/python/gtsam/examples/Pose2SLAMExample.py @@ -27,10 +27,9 @@ def vector3(x, y, z): """Create 3d double numpy array.""" return np.array([x, y, z], dtype=np.float) - # Create noise models -PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.3, 0.3, 0.1)) -ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.2, 0.2, 0.1)) +PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.3, 0.3, 0.1)) +ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.2, 0.2, 0.1)) # 1. Create a factor graph container and add factors to it graph = gtsam.NonlinearFactorGraph() diff --git a/cython/gtsam/examples/Pose2SLAMExample_g2o.py b/python/gtsam/examples/Pose2SLAMExample_g2o.py similarity index 96% rename from cython/gtsam/examples/Pose2SLAMExample_g2o.py rename to python/gtsam/examples/Pose2SLAMExample_g2o.py index 09114370d..b2ba9c5bc 100644 --- a/cython/gtsam/examples/Pose2SLAMExample_g2o.py +++ b/python/gtsam/examples/Pose2SLAMExample_g2o.py @@ -53,7 +53,7 @@ graph, initial = gtsam.readG2o(g2oFile, is3D) assert args.kernel == "none", "Supplied kernel type is not yet implemented" # Add prior on the pose having index (key) = 0 -priorModel = gtsam.noiseModel_Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8)) +priorModel = gtsam.noiseModel.Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8)) graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel)) params = gtsam.GaussNewtonParams() @@ -82,7 +82,7 @@ else: print ("Done!") if args.plot: - resultPoses = gtsam.utilities_extractPose2(result) + resultPoses = gtsam.utilities.extractPose2(result) for i in range(resultPoses.shape[0]): plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :])) plt.show() diff --git a/cython/gtsam/examples/Pose3SLAMExample_g2o.py b/python/gtsam/examples/Pose3SLAMExample_g2o.py similarity index 93% rename from cython/gtsam/examples/Pose3SLAMExample_g2o.py rename to python/gtsam/examples/Pose3SLAMExample_g2o.py index 3c1a54f7b..82b3bda98 100644 --- a/cython/gtsam/examples/Pose3SLAMExample_g2o.py +++ b/python/gtsam/examples/Pose3SLAMExample_g2o.py @@ -39,11 +39,11 @@ is3D = True graph, initial = gtsam.readG2o(g2oFile, is3D) # Add Prior on the first key -priorModel = gtsam.noiseModel_Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, +priorModel = gtsam.noiseModel.Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4)) print("Adding prior to g2o file ") -firstKey = initial.keys().at(0) +firstKey = initial.keys()[0] graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel)) params = gtsam.GaussNewtonParams() @@ -65,7 +65,7 @@ else: print ("Done!") if args.plot: - resultPoses = gtsam.utilities_allPose3s(result) + resultPoses = gtsam.utilities.allPose3s(result) for i in range(resultPoses.size()): plot.plot_pose3(1, resultPoses.atPose3(i)) plt.show() diff --git a/cython/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py b/python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py similarity index 91% rename from cython/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py rename to python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py index 02c696905..2b2c5f991 100644 --- a/cython/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py +++ b/python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py @@ -24,9 +24,9 @@ is3D = True graph, initial = gtsam.readG2o(g2oFile, is3D) # Add prior on the first key. TODO: assumes first key ios z -priorModel = gtsam.noiseModel_Diagonal.Variances( +priorModel = gtsam.noiseModel.Diagonal.Variances( np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4])) -firstKey = initial.keys().at(0) +firstKey = initial.keys()[0] graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel)) # Initializing Pose3 - chordal relaxation" diff --git a/cython/gtsam/examples/PreintegrationExample.py b/python/gtsam/examples/PreintegrationExample.py similarity index 78% rename from cython/gtsam/examples/PreintegrationExample.py rename to python/gtsam/examples/PreintegrationExample.py index 76520b355..e308bc933 100644 --- a/cython/gtsam/examples/PreintegrationExample.py +++ b/python/gtsam/examples/PreintegrationExample.py @@ -10,12 +10,11 @@ A script validating the Preintegration of IMU measurements import math +import gtsam import matplotlib.pyplot as plt import numpy as np -from mpl_toolkits.mplot3d import Axes3D - -import gtsam from gtsam.utils.plot import plot_pose3 +from mpl_toolkits.mplot3d import Axes3D IMU_FIG = 1 POSES_FIG = 2 @@ -68,60 +67,62 @@ class PreintegrationExample(object): else: accBias = np.array([0, 0.1, 0]) gyroBias = np.array([0, 0, 0]) - self.actualBias = gtsam.imuBias_ConstantBias(accBias, gyroBias) + self.actualBias = gtsam.imuBias.ConstantBias(accBias, gyroBias) self.runner = gtsam.ScenarioRunner( self.scenario, self.params, self.dt, self.actualBias) + fig, self.axes = plt.subplots(4, 3) + fig.set_tight_layout(True) + def plotImu(self, t, measuredOmega, measuredAcc): plt.figure(IMU_FIG) # plot angular velocity omega_b = self.scenario.omega_b(t) for i, (label, color) in enumerate(zip(self.labels, self.colors)): - plt.subplot(4, 3, i + 1) - plt.scatter(t, omega_b[i], color='k', marker='.') - plt.scatter(t, measuredOmega[i], color=color, marker='.') - plt.xlabel('angular velocity ' + label) + ax = self.axes[0][i] + ax.scatter(t, omega_b[i], color='k', marker='.') + ax.scatter(t, measuredOmega[i], color=color, marker='.') + ax.set_xlabel('angular velocity ' + label) # plot acceleration in nav acceleration_n = self.scenario.acceleration_n(t) for i, (label, color) in enumerate(zip(self.labels, self.colors)): - plt.subplot(4, 3, i + 4) - plt.scatter(t, acceleration_n[i], color=color, marker='.') - plt.xlabel('acceleration in nav ' + label) + ax = self.axes[1][i] + ax.scatter(t, acceleration_n[i], color=color, marker='.') + ax.set_xlabel('acceleration in nav ' + label) # plot acceleration in body acceleration_b = self.scenario.acceleration_b(t) for i, (label, color) in enumerate(zip(self.labels, self.colors)): - plt.subplot(4, 3, i + 7) - plt.scatter(t, acceleration_b[i], color=color, marker='.') - plt.xlabel('acceleration in body ' + label) + ax = self.axes[2][i] + ax.scatter(t, acceleration_b[i], color=color, marker='.') + ax.set_xlabel('acceleration in body ' + label) # plot actual specific force, as well as corrupted actual = self.runner.actualSpecificForce(t) for i, (label, color) in enumerate(zip(self.labels, self.colors)): - plt.subplot(4, 3, i + 10) - plt.scatter(t, actual[i], color='k', marker='.') - plt.scatter(t, measuredAcc[i], color=color, marker='.') - plt.xlabel('specific force ' + label) + ax = self.axes[3][i] + ax.scatter(t, actual[i], color='k', marker='.') + ax.scatter(t, measuredAcc[i], color=color, marker='.') + ax.set_xlabel('specific force ' + label) - def plotGroundTruthPose(self, t): + def plotGroundTruthPose(self, t, scale=0.3, time_interval=0.01): # plot ground truth pose, as well as prediction from integrated IMU measurements actualPose = self.scenario.pose(t) plot_pose3(POSES_FIG, actualPose, 0.3) t = actualPose.translation() - self.maxDim = max([abs(t.x()), abs(t.y()), abs(t.z()), self.maxDim]) + self.maxDim = max([max(np.abs(t)), self.maxDim]) ax = plt.gca() ax.set_xlim3d(-self.maxDim, self.maxDim) ax.set_ylim3d(-self.maxDim, self.maxDim) ax.set_zlim3d(-self.maxDim, self.maxDim) - plt.pause(0.01) + plt.pause(time_interval) - def run(self): + def run(self, T=12): # simulate the loop - T = 12 for i, t in enumerate(np.arange(0, T, self.dt)): measuredOmega = self.runner.measuredAngularVelocity(t) measuredAcc = self.runner.measuredSpecificForce(t) diff --git a/cython/gtsam/examples/README.md b/python/gtsam/examples/README.md similarity index 84% rename from cython/gtsam/examples/README.md rename to python/gtsam/examples/README.md index 99bce00e2..e998e4dcd 100644 --- a/cython/gtsam/examples/README.md +++ b/python/gtsam/examples/README.md @@ -1,17 +1,12 @@ -These examples are almost identical to the old handwritten python wrapper -examples. However, there are just some slight name changes, for example -`noiseModel.Diagonal` becomes `noiseModel_Diagonal` etc... -Also, instead of `gtsam.Symbol('b', 0)` we can simply say `gtsam.symbol_shorthand_B(0)` or `B(0)` if we use python aliasing. - # Porting Progress | C++ Example Name | Ported | |-------------------------------------------------------|--------| | CameraResectioning | | | CreateSFMExampleData | | -| DiscreteBayesNet_FG | none of the required discrete functionality is exposed through cython | -| easyPoint2KalmanFilter | ExtendedKalmanFilter not exposed through cython | -| elaboratePoint2KalmanFilter | GaussianSequentialSolver not exposed through cython | +| DiscreteBayesNet_FG | none of the required discrete functionality is exposed through Python | +| easyPoint2KalmanFilter | ExtendedKalmanFilter not yet exposed through Python | +| elaboratePoint2KalmanFilter | GaussianSequentialSolver not yet exposed through Python | | ImuFactorExample2 | X | | ImuFactorsExample | | | ISAM2Example_SmartFactor | | @@ -25,7 +20,7 @@ Also, instead of `gtsam.Symbol('b', 0)` we can simply say `gtsam.symbol_shorthan | Pose2SLAMExample_g2o | X | | Pose2SLAMExample_graph | | | Pose2SLAMExample_graphviz | | -| Pose2SLAMExample_lago | lago not exposed through cython | +| Pose2SLAMExample_lago | lago not yet exposed through Python | | Pose2SLAMStressTest | | | Pose2SLAMwSPCG | | | Pose3SLAMExample_changeKeys | | @@ -47,11 +42,11 @@ Also, instead of `gtsam.Symbol('b', 0)` we can simply say `gtsam.symbol_shorthan | StereoVOExample | | | StereoVOExample_large | | | TimeTBB | | -| UGM_chain | discrete functionality not exposed | -| UGM_small | discrete functionality not exposed | +| UGM_chain | discrete functionality not yet exposed | +| UGM_small | discrete functionality not yet exposed | | VisualISAM2Example | X | | VisualISAMExample | X | Extra Examples (with no C++ equivalent) - PlanarManipulatorExample -- SFMData +- SFMData \ No newline at end of file diff --git a/cython/gtsam/examples/SFMExample.py b/python/gtsam/examples/SFMExample.py similarity index 91% rename from cython/gtsam/examples/SFMExample.py rename to python/gtsam/examples/SFMExample.py index e02def2f9..f0c4c82ba 100644 --- a/cython/gtsam/examples/SFMExample.py +++ b/python/gtsam/examples/SFMExample.py @@ -13,14 +13,15 @@ from __future__ import print_function import gtsam import matplotlib.pyplot as plt import numpy as np -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X +from gtsam import symbol_shorthand +L = symbol_shorthand.L +X = symbol_shorthand.X + from gtsam.examples import SFMdata -from gtsam.gtsam import (Cal3_S2, DoglegOptimizer, +from gtsam import (Cal3_S2, DoglegOptimizer, GenericProjectionFactorCal3_S2, Marginals, NonlinearFactorGraph, PinholeCameraCal3_S2, Point3, - Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, - SimpleCamera, Values) + Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, Values) from gtsam.utils import plot @@ -56,7 +57,7 @@ def main(): K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - measurement_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v + measurement_noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks points = SFMdata.createPoints() @@ -69,7 +70,7 @@ def main(): # Add a prior on pose x1. This indirectly specifies where the origin is. # 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z - pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) + pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) factor = PriorFactorPose3(X(0), poses[0], pose_noise) graph.push_back(factor) @@ -85,7 +86,7 @@ def main(): # Because the structure-from-motion problem has a scale ambiguity, the problem is still under-constrained # Here we add a prior on the position of the first landmark. This fixes the scale by indicating the distance # between the first camera and the first landmark. All other landmark positions are interpreted using this scale. - point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) factor = PriorFactorPoint3(L(0), points[0], point_noise) graph.push_back(factor) graph.print_('Factor Graph:\n') @@ -97,7 +98,7 @@ def main(): transformed_pose = pose.retract(0.1*np.random.randn(6,1)) initial_estimate.insert(X(i), transformed_pose) for j, point in enumerate(points): - transformed_point = Point3(point.vector() + 0.1*np.random.randn(3)) + transformed_point = point + 0.1*np.random.randn(3) initial_estimate.insert(L(j), transformed_point) initial_estimate.print_('Initial Estimates:\n') diff --git a/cython/gtsam/examples/SFMdata.py b/python/gtsam/examples/SFMdata.py similarity index 92% rename from cython/gtsam/examples/SFMdata.py rename to python/gtsam/examples/SFMdata.py index c586f7e52..6ac9c5726 100644 --- a/cython/gtsam/examples/SFMdata.py +++ b/python/gtsam/examples/SFMdata.py @@ -33,7 +33,8 @@ def createPoses(K): poses = [] for theta in angles: position = gtsam.Point3(radius*np.cos(theta), - radius*np.sin(theta), height) + radius*np.sin(theta), + height) camera = gtsam.PinholeCameraCal3_S2.Lookat(position, target, up, K) poses.append(camera.pose()) return poses diff --git a/python/gtsam/examples/ShonanAveragingCLI.py b/python/gtsam/examples/ShonanAveragingCLI.py new file mode 100644 index 000000000..7f8839cc0 --- /dev/null +++ b/python/gtsam/examples/ShonanAveragingCLI.py @@ -0,0 +1,125 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +Shonan Rotation Averaging CLI reads a *pose* graph, extracts the +rotation constraints, and runs the Shonan algorithm. + +Author: Frank Dellaert +Date: August 2020 +""" +# pylint: disable=invalid-name, E1101 + +import argparse + +import matplotlib.pyplot as plt +import numpy as np + +import gtsam +from gtsam.utils import plot + +def estimate_poses_given_rot(factors: gtsam.BetweenFactorPose3s, + rotations: gtsam.Values, + d: int = 3): + """ Estimate Poses from measurements, given rotations. From SfmProblem in shonan. + + Arguments: + factors -- data structure with many BetweenFactorPose3 factors + rotations {Values} -- Estimated rotations + + Returns: + Values -- Estimated Poses + """ + + I_d = np.eye(d) + + def R(j): + return rotations.atRot3(j) if d == 3 else rotations.atRot2(j) + + def pose(R, t): + return gtsam.Pose3(R, t) if d == 3 else gtsam.Pose2(R, t) + + graph = gtsam.GaussianFactorGraph() + model = gtsam.noiseModel.Unit.Create(d) + + # Add a factor anchoring t_0 + graph.add(0, I_d, np.zeros((d,)), model) + + # Add a factor saying t_j - t_i = Ri*t_ij for all edges (i,j) + for factor in factors: + keys = factor.keys() + i, j, Tij = keys[0], keys[1], factor.measured() + measured = R(i).rotate(Tij.translation()) + graph.add(j, I_d, i, -I_d, measured, model) + + # Solve linear system + translations = graph.optimize() + + # Convert to Values. + result = gtsam.Values() + for j in range(rotations.size()): + tj = translations.at(j) + result.insert(j, pose(R(j), tj)) + + return result + + +def run(args): + """Run Shonan averaging and then recover translations linearly before saving result.""" + + # Get input file + if args.input_file: + input_file = args.input_file + else: + if args.named_dataset == "": + raise ValueError( + "You must either specify a named dataset or an input file") + input_file = gtsam.findExampleDataFile(args.named_dataset) + + if args.dimension == 2: + print("Running Shonan averaging for SO(2) on ", input_file) + shonan = gtsam.ShonanAveraging2(input_file) + if shonan.nrUnknowns() == 0: + raise ValueError("No 2D pose constraints found, try -d 3.") + initial = shonan.initializeRandomly() + rotations, _ = shonan.run(initial, 2, 10) + factors = gtsam.parse2DFactors(input_file) + elif args.dimension == 3: + print("Running Shonan averaging for SO(3) on ", input_file) + shonan = gtsam.ShonanAveraging3(input_file) + if shonan.nrUnknowns() == 0: + raise ValueError("No 3D pose constraints found, try -d 2.") + initial = shonan.initializeRandomly() + rotations, _ = shonan.run(initial, 3, 10) + factors = gtsam.parse3DFactors(input_file) + else: + raise ValueError("Can only run SO(2) or SO(3) averaging") + + print("Recovering translations") + poses = estimate_poses_given_rot(factors, rotations, args.dimension) + + print("Writing result to ", args.output_file) + gtsam.writeG2o(gtsam.NonlinearFactorGraph(), poses, args.output_file) + + plot.plot_trajectory(1, poses, scale=0.2) + plot.set_axes_equal(1) + plt.show() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-n', '--named_dataset', type=str, default="pose3example-grid", + help='Find and read frome example dataset file') + parser.add_argument('-i', '--input_file', type=str, default="", + help='Read pose constraints graph from the specified file') + parser.add_argument('-o', '--output_file', type=str, default="shonan.g2o", + help='Write solution to the specified file') + parser.add_argument('-d', '--dimension', type=int, default=3, + help='Optimize over 2D or 3D rotations') + parser.add_argument("-p", "--plot", action="store_true", default=True, + help="Plot result") + run(parser.parse_args()) diff --git a/cython/gtsam/examples/SimpleRotation.py b/python/gtsam/examples/SimpleRotation.py similarity index 96% rename from cython/gtsam/examples/SimpleRotation.py rename to python/gtsam/examples/SimpleRotation.py index 4e82d3778..0fef261f8 100644 --- a/cython/gtsam/examples/SimpleRotation.py +++ b/python/gtsam/examples/SimpleRotation.py @@ -10,10 +10,9 @@ This example will perform a relatively trivial optimization on a single variable with a single factor. """ -import gtsam import numpy as np -from gtsam import symbol_shorthand_X as X - +import gtsam +from gtsam.symbol_shorthand import X def main(): """ @@ -33,7 +32,7 @@ def main(): """ prior = gtsam.Rot2.fromAngle(np.deg2rad(30)) prior.print_('goal angle') - model = gtsam.noiseModel_Isotropic.Sigma(dim=1, sigma=np.deg2rad(1)) + model = gtsam.noiseModel.Isotropic.Sigma(dim=1, sigma=np.deg2rad(1)) key = X(1) factor = gtsam.PriorFactorRot2(key, prior, model) diff --git a/cython/gtsam/examples/VisualISAM2Example.py b/python/gtsam/examples/VisualISAM2Example.py similarity index 94% rename from cython/gtsam/examples/VisualISAM2Example.py rename to python/gtsam/examples/VisualISAM2Example.py index 49e6ca95c..bacf510ec 100644 --- a/cython/gtsam/examples/VisualISAM2Example.py +++ b/python/gtsam/examples/VisualISAM2Example.py @@ -17,8 +17,7 @@ import gtsam import gtsam.utils.plot as gtsam_plot import matplotlib.pyplot as plt import numpy as np -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X +from gtsam.symbol_shorthand import L, X from gtsam.examples import SFMdata from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 @@ -64,7 +63,7 @@ def visual_ISAM2_example(): K = gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - measurement_noise = gtsam.noiseModel_Isotropic.Sigma( + measurement_noise = gtsam.noiseModel.Isotropic.Sigma( 2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks @@ -110,12 +109,12 @@ def visual_ISAM2_example(): # at least twice before adding it to iSAM. if i == 0: # Add a prior on pose x0 - pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array( + pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array( [0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw graph.push_back(gtsam.PriorFactorPose3(X(0), poses[0], pose_noise)) # Add a prior on landmark l0 - point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) graph.push_back(gtsam.PriorFactorPoint3( L(0), points[0], point_noise)) # add directly to graph @@ -123,7 +122,7 @@ def visual_ISAM2_example(): # Intentionally initialize the variables off from the ground truth for j, point in enumerate(points): initial_estimate.insert(L(j), gtsam.Point3( - point.x()-0.25, point.y()+0.20, point.z()+0.15)) + point[0]-0.25, point[1]+0.20, point[2]+0.15)) else: # Update iSAM with the new factors isam.update(graph, initial_estimate) diff --git a/cython/gtsam/examples/VisualISAMExample.py b/python/gtsam/examples/VisualISAMExample.py similarity index 77% rename from cython/gtsam/examples/VisualISAMExample.py rename to python/gtsam/examples/VisualISAMExample.py index 5cc37867b..f99d3f3e6 100644 --- a/cython/gtsam/examples/VisualISAMExample.py +++ b/python/gtsam/examples/VisualISAMExample.py @@ -15,13 +15,11 @@ from __future__ import print_function import numpy as np import gtsam from gtsam.examples import SFMdata -from gtsam.gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2, - NonlinearFactorGraph, NonlinearISAM, Point3, Pose3, - PriorFactorPoint3, PriorFactorPose3, Rot3, - PinholeCameraCal3_S2, Values) -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X - +from gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2, + NonlinearFactorGraph, NonlinearISAM, Pose3, + PriorFactorPoint3, PriorFactorPose3, Rot3, + PinholeCameraCal3_S2, Values, Point3) +from gtsam.symbol_shorthand import X, L def main(): """ @@ -34,7 +32,8 @@ def main(): K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - camera_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v + camera_noise = gtsam.noiseModel.Isotropic.Sigma( + 2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks points = SFMdata.createPoints() @@ -55,11 +54,13 @@ def main(): # Add factors for each landmark observation for j, point in enumerate(points): measurement = camera.project(point) - factor = GenericProjectionFactorCal3_S2(measurement, camera_noise, X(i), L(j), K) + factor = GenericProjectionFactorCal3_S2( + measurement, camera_noise, X(i), L(j), K) graph.push_back(factor) # Intentionally initialize the variables off from the ground truth - noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), t=Point3(0.05, -0.10, 0.20)) + noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), + t=Point3(0.05, -0.10, 0.20)) initial_xi = pose.compose(noise) # Add an initial guess for the current pose @@ -71,12 +72,13 @@ def main(): # adding it to iSAM. if i == 0: # Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z - pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) + pose_noise = gtsam.noiseModel.Diagonal.Sigmas( + np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) factor = PriorFactorPose3(X(0), poses[0], pose_noise) graph.push_back(factor) # Add a prior on landmark l0 - point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) factor = PriorFactorPoint3(L(0), points[0], point_noise) graph.push_back(factor) @@ -84,8 +86,8 @@ def main(): noise = np.array([-0.25, 0.20, 0.15]) for j, point in enumerate(points): # Intentionally initialize the variables off from the ground truth - initial_lj = points[j].vector() + noise - initial_estimate.insert(L(j), Point3(initial_lj)) + initial_lj = points[j] + noise + initial_estimate.insert(L(j), initial_lj) else: # Update iSAM with the new factors isam.update(graph, initial_estimate) diff --git a/cython/gtsam/examples/__init__.py b/python/gtsam/examples/__init__.py similarity index 100% rename from cython/gtsam/examples/__init__.py rename to python/gtsam/examples/__init__.py diff --git a/python/gtsam/gtsam.tpl b/python/gtsam/gtsam.tpl new file mode 100644 index 000000000..634a81e90 --- /dev/null +++ b/python/gtsam/gtsam.tpl @@ -0,0 +1,48 @@ +/** + * @file gtsam.cpp + * @brief The auto-generated wrapper C++ source code. + * @author Duy-Nguyen Ta, Fan Jiang, Matthew Sklar + * @date Aug. 18, 2020 + * + * ** THIS FILE IS AUTO-GENERATED, DO NOT MODIFY! ** + */ + +// Include relevant boost libraries required by GTSAM +{include_boost} + +#include +#include +#include +#include "gtsam/config.h" +#include "gtsam/base/serialization.h" +#include "gtsam/nonlinear/utilities.h" // for RedirectCout. + +// These are the included headers listed in `gtsam.i` +{includes} +#include + +// Export classes for serialization +{boost_class_export} + +// Holder type for pybind11 +{hoder_type} + +// Preamble for STL classes +// TODO(fan): make this automatic +#include "python/gtsam/preamble.h" + +using namespace std; + +namespace py = pybind11; + +PYBIND11_MODULE({module_name}, m_) {{ + m_.doc() = "pybind11 wrapper of {module_name}"; + +{wrapped_namespace} + +// Specializations for STL classes +// TODO(fan): make this automatic +#include "python/gtsam/specializations.h" + +}} + diff --git a/python/gtsam/imuBias.py b/python/gtsam/imuBias.py new file mode 100644 index 000000000..399cefb98 --- /dev/null +++ b/python/gtsam/imuBias.py @@ -0,0 +1,4 @@ +# This trick is to allow direct import of sub-modules +# without this, we can only do `from gtsam.gtsam.imuBias import X` +# with this trick, we can do `from gtsam.imuBias import X` +from .gtsam.imuBias import * diff --git a/python/gtsam/noiseModel.py b/python/gtsam/noiseModel.py new file mode 100644 index 000000000..6e1b43488 --- /dev/null +++ b/python/gtsam/noiseModel.py @@ -0,0 +1,4 @@ +# This trick is to allow direct import of sub-modules +# without this, we can only do `from gtsam.gtsam.noiseModel import X` +# with this trick, we can do `from gtsam.noiseModel import X` +from .gtsam.noiseModel import * \ No newline at end of file diff --git a/python/gtsam/preamble.h b/python/gtsam/preamble.h new file mode 100644 index 000000000..9f3032dd0 --- /dev/null +++ b/python/gtsam/preamble.h @@ -0,0 +1,11 @@ +// Please refer to: https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html +// These are required to save one copy operation on Python calls +#ifdef GTSAM_ALLOCATOR_TBB +PYBIND11_MAKE_OPAQUE(std::vector>); +#else +PYBIND11_MAKE_OPAQUE(std::vector); +#endif +PYBIND11_MAKE_OPAQUE(std::vector >); +PYBIND11_MAKE_OPAQUE(std::vector); +PYBIND11_MAKE_OPAQUE(std::vector > >); +PYBIND11_MAKE_OPAQUE(std::vector > >); diff --git a/python/gtsam/specializations.h b/python/gtsam/specializations.h new file mode 100644 index 000000000..3b60e42cb --- /dev/null +++ b/python/gtsam/specializations.h @@ -0,0 +1,11 @@ +// Please refer to: https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html +// These are required to save one copy operation on Python calls +#ifdef GTSAM_ALLOCATOR_TBB +py::bind_vector > >(m_, "KeyVector"); +#else +py::bind_vector >(m_, "KeyVector"); +#endif +py::bind_vector > >(m_, "Point2Vector"); +py::bind_vector >(m_, "Pose3Vector"); +py::bind_vector > > >(m_, "BetweenFactorPose3s"); +py::bind_vector > > >(m_, "BetweenFactorPose2s"); diff --git a/python/gtsam/symbol_shorthand.py b/python/gtsam/symbol_shorthand.py new file mode 100644 index 000000000..748d36558 --- /dev/null +++ b/python/gtsam/symbol_shorthand.py @@ -0,0 +1,4 @@ +# This trick is to allow direct import of sub-modules +# without this, we can only do `from gtsam.gtsam.symbol_shorthand import X` +# with this trick, we can do `from gtsam.symbol_shorthand import X` +from .gtsam.symbol_shorthand import * \ No newline at end of file diff --git a/cython/gtsam/tests/testScenarioRunner.py b/python/gtsam/tests/testScenarioRunner.py similarity index 96% rename from cython/gtsam/tests/testScenarioRunner.py rename to python/gtsam/tests/testScenarioRunner.py index 97a97b0ec..2af16a794 100644 --- a/cython/gtsam/tests/testScenarioRunner.py +++ b/python/gtsam/tests/testScenarioRunner.py @@ -32,7 +32,7 @@ class TestScenarioRunner(GtsamTestCase): dt = 0.1 params = gtsam.PreintegrationParams.MakeSharedU(self.g) - bias = gtsam.imuBias_ConstantBias() + bias = gtsam.imuBias.ConstantBias() runner = gtsam.ScenarioRunner( scenario, params, dt, bias) diff --git a/cython/gtsam/tests/test_Cal3Unified.py b/python/gtsam/tests/test_Cal3Unified.py similarity index 100% rename from cython/gtsam/tests/test_Cal3Unified.py rename to python/gtsam/tests/test_Cal3Unified.py diff --git a/cython/gtsam/tests/test_FrobeniusFactor.py b/python/gtsam/tests/test_FrobeniusFactor.py similarity index 92% rename from cython/gtsam/tests/test_FrobeniusFactor.py rename to python/gtsam/tests/test_FrobeniusFactor.py index f3f5354bb..e808627f5 100644 --- a/cython/gtsam/tests/test_FrobeniusFactor.py +++ b/python/gtsam/tests/test_FrobeniusFactor.py @@ -13,7 +13,7 @@ import unittest import numpy as np from gtsam import (Rot3, SO3, SO4, FrobeniusBetweenFactorSO4, FrobeniusFactorSO4, - FrobeniusWormholeFactor, SOn) + ShonanFactor3, SOn) id = SO4() v1 = np.array([0, 0, 0, 0.1, 0, 0]) @@ -43,7 +43,7 @@ class TestFrobeniusFactorSO4(unittest.TestCase): """Test creation of a factor that calculates Shonan error.""" R1 = SO3.Expmap(v1[3:]) R2 = SO3.Expmap(v2[3:]) - factor = FrobeniusWormholeFactor(1, 2, Rot3(R1.between(R2).matrix()), p=4) + factor = ShonanFactor3(1, 2, Rot3(R1.between(R2).matrix()), p=4) I4 = SOn(4) Q1 = I4.retract(v1) Q2 = I4.retract(v2) diff --git a/cython/gtsam/tests/test_GaussianFactorGraph.py b/python/gtsam/tests/test_GaussianFactorGraph.py similarity index 94% rename from cython/gtsam/tests/test_GaussianFactorGraph.py rename to python/gtsam/tests/test_GaussianFactorGraph.py index 983825d8b..a29b0f263 100644 --- a/cython/gtsam/tests/test_GaussianFactorGraph.py +++ b/python/gtsam/tests/test_GaussianFactorGraph.py @@ -16,7 +16,7 @@ import unittest import gtsam import numpy as np -from gtsam import symbol_shorthand_X as X +from gtsam.symbol_shorthand import X from gtsam.utils.test_case import GtsamTestCase @@ -28,8 +28,8 @@ def create_graph(): x1 = X(1) x2 = X(2) - BETWEEN_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1)) - PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1)) + BETWEEN_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1)) + PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1)) graph.add(x1, np.eye(1), x0, -np.eye(1), np.ones(1), BETWEEN_NOISE) graph.add(x2, np.eye(1), x1, -np.eye(1), 2*np.ones(1), BETWEEN_NOISE) diff --git a/cython/gtsam/tests/test_JacobianFactor.py b/python/gtsam/tests/test_JacobianFactor.py similarity index 93% rename from cython/gtsam/tests/test_JacobianFactor.py rename to python/gtsam/tests/test_JacobianFactor.py index 04433492b..6e049ed47 100644 --- a/cython/gtsam/tests/test_JacobianFactor.py +++ b/python/gtsam/tests/test_JacobianFactor.py @@ -48,7 +48,7 @@ class TestJacobianFactor(GtsamTestCase): # the RHS b2 = np.array([-1., 1.5, 2., -1.]) sigmas = np.array([1., 1., 1., 1.]) - model4 = gtsam.noiseModel_Diagonal.Sigmas(sigmas) + model4 = gtsam.noiseModel.Diagonal.Sigmas(sigmas) combined = gtsam.JacobianFactor(x2, Ax2, l1, Al1, x1, Ax1, b2, model4) # eliminate the first variable (x2) in the combined factor, destructive @@ -66,7 +66,7 @@ class TestJacobianFactor(GtsamTestCase): [+0.00, -8.94427]]) d = np.array([2.23607, -1.56525]) expectedCG = gtsam.GaussianConditional( - x2, d, R11, l1, S12, x1, S13, gtsam.noiseModel_Unit.Create(2)) + x2, d, R11, l1, S12, x1, S13, gtsam.noiseModel.Unit.Create(2)) # check if the result matches self.gtsamAssertEquals(actualCG, expectedCG, 1e-4) @@ -82,7 +82,7 @@ class TestJacobianFactor(GtsamTestCase): # the RHS b1 = np.array([0.0, 0.894427]) - model2 = gtsam.noiseModel_Diagonal.Sigmas(np.array([1., 1.])) + model2 = gtsam.noiseModel.Diagonal.Sigmas(np.array([1., 1.])) expectedLF = gtsam.JacobianFactor(l1, Bl1, x1, Bx1, b1, model2) # check if the result matches the combined (reduced) factor diff --git a/cython/gtsam/tests/test_KalmanFilter.py b/python/gtsam/tests/test_KalmanFilter.py similarity index 95% rename from cython/gtsam/tests/test_KalmanFilter.py rename to python/gtsam/tests/test_KalmanFilter.py index 94c41df72..48a91b96c 100644 --- a/cython/gtsam/tests/test_KalmanFilter.py +++ b/python/gtsam/tests/test_KalmanFilter.py @@ -22,13 +22,13 @@ class TestKalmanFilter(GtsamTestCase): F = np.eye(2) B = np.eye(2) u = np.array([1.0, 0.0]) - modelQ = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1])) + modelQ = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1])) Q = 0.01 * np.eye(2) H = np.eye(2) z1 = np.array([1.0, 0.0]) z2 = np.array([2.0, 0.0]) z3 = np.array([3.0, 0.0]) - modelR = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1])) + modelR = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1])) R = 0.01 * np.eye(2) # Create the set of expected output TestValues diff --git a/cython/gtsam/tests/test_KarcherMeanFactor.py b/python/gtsam/tests/test_KarcherMeanFactor.py similarity index 96% rename from cython/gtsam/tests/test_KarcherMeanFactor.py rename to python/gtsam/tests/test_KarcherMeanFactor.py index 6976decc1..a315a506c 100644 --- a/cython/gtsam/tests/test_KarcherMeanFactor.py +++ b/python/gtsam/tests/test_KarcherMeanFactor.py @@ -18,7 +18,7 @@ import numpy as np from gtsam.utils.test_case import GtsamTestCase KEY = 0 -MODEL = gtsam.noiseModel_Unit.Create(3) +MODEL = gtsam.noiseModel.Unit.Create(3) def find_Karcher_mean_Rot3(rotations): @@ -59,8 +59,8 @@ class TestKarcherMean(GtsamTestCase): R12 = R.compose(R.compose(R)) graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL)) keys = gtsam.KeyVector() - keys.push_back(1) - keys.push_back(2) + keys.append(1) + keys.append(2) graph.add(gtsam.KarcherMeanFactorRot3(keys)) initial = gtsam.Values() diff --git a/cython/gtsam/tests/test_LocalizationExample.py b/python/gtsam/tests/test_LocalizationExample.py similarity index 94% rename from cython/gtsam/tests/test_LocalizationExample.py rename to python/gtsam/tests/test_LocalizationExample.py index 6ce65f087..8ae3583f0 100644 --- a/cython/gtsam/tests/test_LocalizationExample.py +++ b/python/gtsam/tests/test_LocalizationExample.py @@ -26,7 +26,7 @@ class TestLocalizationExample(GtsamTestCase): # Add two odometry factors # create a measurement for both factors (the same in this case) odometry = gtsam.Pose2(2.0, 0.0, 0.0) - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas( + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.2, 0.2, 0.1])) # 20cm std on x,y, 0.1 rad on theta graph.add(gtsam.BetweenFactorPose2(0, 1, odometry, odometryNoise)) graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise)) @@ -37,7 +37,7 @@ class TestLocalizationExample(GtsamTestCase): groundTruth.insert(0, gtsam.Pose2(0.0, 0.0, 0.0)) groundTruth.insert(1, gtsam.Pose2(2.0, 0.0, 0.0)) groundTruth.insert(2, gtsam.Pose2(4.0, 0.0, 0.0)) - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1, 10.])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1, 10.])) for i in range(3): graph.add(gtsam.PriorFactorPose2(i, groundTruth.atPose2(i), model)) diff --git a/cython/gtsam/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py similarity index 88% rename from cython/gtsam/tests/test_NonlinearOptimizer.py rename to python/gtsam/tests/test_NonlinearOptimizer.py index 985dc30a2..e9234a43b 100644 --- a/cython/gtsam/tests/test_NonlinearOptimizer.py +++ b/python/gtsam/tests/test_NonlinearOptimizer.py @@ -15,11 +15,11 @@ from __future__ import print_function import unittest import gtsam -from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer, +from gtsam import (DoglegOptimizer, DoglegParams, + DummyPreconditionerParameters, GaussNewtonOptimizer, GaussNewtonParams, LevenbergMarquardtOptimizer, - LevenbergMarquardtParams, PCGSolverParameters, - DummyPreconditionerParameters, NonlinearFactorGraph, Ordering, - Point2, PriorFactorPoint2, Values) + LevenbergMarquardtParams, NonlinearFactorGraph, Ordering, + PCGSolverParameters, Point2, PriorFactorPoint2, Values) from gtsam.utils.test_case import GtsamTestCase KEY1 = 1 @@ -30,7 +30,7 @@ class TestScenario(GtsamTestCase): def test_optimize(self): """Do trivial test with three optimizer variants.""" fg = NonlinearFactorGraph() - model = gtsam.noiseModel_Unit.Create(2) + model = gtsam.noiseModel.Unit.Create(2) fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model)) # test error at minimum diff --git a/cython/gtsam/tests/test_OdometryExample.py b/python/gtsam/tests/test_OdometryExample.py similarity index 94% rename from cython/gtsam/tests/test_OdometryExample.py rename to python/gtsam/tests/test_OdometryExample.py index c8ea95588..72e532f20 100644 --- a/cython/gtsam/tests/test_OdometryExample.py +++ b/python/gtsam/tests/test_OdometryExample.py @@ -25,7 +25,7 @@ class TestOdometryExample(GtsamTestCase): # Add a Gaussian prior on pose x_1 priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior mean is at origin - priorNoise = gtsam.noiseModel_Diagonal.Sigmas( + priorNoise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.3, 0.3, 0.1])) # 30cm std on x,y, 0.1 rad on theta # add directly to graph graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) @@ -33,7 +33,7 @@ class TestOdometryExample(GtsamTestCase): # Add two odometry factors # create a measurement for both factors (the same in this case) odometry = gtsam.Pose2(2.0, 0.0, 0.0) - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas( + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.2, 0.2, 0.1])) # 20cm std on x,y, 0.1 rad on theta graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise)) graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise)) diff --git a/cython/gtsam/tests/test_PlanarSLAMExample.py b/python/gtsam/tests/test_PlanarSLAMExample.py similarity index 93% rename from cython/gtsam/tests/test_PlanarSLAMExample.py rename to python/gtsam/tests/test_PlanarSLAMExample.py index ae813d35c..8cb3ad2ac 100644 --- a/cython/gtsam/tests/test_PlanarSLAMExample.py +++ b/python/gtsam/tests/test_PlanarSLAMExample.py @@ -32,13 +32,13 @@ class TestPlanarSLAM(GtsamTestCase): # Add prior # gaussian for prior priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin - priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) # add directly to graph graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) # Add odometry # general noisemodel for odometry - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2( 1, 2, gtsam.Pose2(2.0, 0.0, 0.0), odometryNoise)) graph.add(gtsam.BetweenFactorPose2( @@ -49,7 +49,7 @@ class TestPlanarSLAM(GtsamTestCase): 4, 5, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) # Add pose constraint - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2), model)) # Initialize to noisy points diff --git a/python/gtsam/tests/test_Point2.py b/python/gtsam/tests/test_Point2.py new file mode 100644 index 000000000..52ac92970 --- /dev/null +++ b/python/gtsam/tests/test_Point2.py @@ -0,0 +1,29 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Point2 unit tests. +Author: Frank Dellaert & Fan Jiang +""" +import unittest + +import gtsam +import numpy as np +from gtsam.utils.test_case import GtsamTestCase + + +class TestPoint2(GtsamTestCase): + """Test selected Point2 methods.""" + + def test_constructors(self): + """Test constructors from doubles and vectors.""" + expected = gtsam.Point2(1, 2) + actual = gtsam.Point2(np.array([1, 2])) + np.testing.assert_array_equal(actual, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam/tests/test_Point3.py b/python/gtsam/tests/test_Point3.py new file mode 100644 index 000000000..f7a1c0f06 --- /dev/null +++ b/python/gtsam/tests/test_Point3.py @@ -0,0 +1,29 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Point3 unit tests. +Author: Frank Dellaert & Fan Jiang +""" +import unittest + +import gtsam +import numpy as np +from gtsam.utils.test_case import GtsamTestCase + + +class TestPoint3(GtsamTestCase): + """Test selected Point3 methods.""" + + def test_constructors(self): + """Test constructors from doubles and vectors.""" + expected = gtsam.Point3(1, 2, 3) + actual = gtsam.Point3(np.array([1, 2, 3])) + np.testing.assert_array_equal(actual, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/cython/gtsam/tests/test_Pose2.py b/python/gtsam/tests/test_Pose2.py similarity index 100% rename from cython/gtsam/tests/test_Pose2.py rename to python/gtsam/tests/test_Pose2.py diff --git a/cython/gtsam/tests/test_Pose2SLAMExample.py b/python/gtsam/tests/test_Pose2SLAMExample.py similarity index 93% rename from cython/gtsam/tests/test_Pose2SLAMExample.py rename to python/gtsam/tests/test_Pose2SLAMExample.py index a79b6b18c..e47b9fbff 100644 --- a/cython/gtsam/tests/test_Pose2SLAMExample.py +++ b/python/gtsam/tests/test_Pose2SLAMExample.py @@ -32,13 +32,13 @@ class TestPose2SLAMExample(GtsamTestCase): # Add prior # gaussian for prior priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin - priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) # add directly to graph graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) # Add odometry # general noisemodel for odometry - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2( 1, 2, gtsam.Pose2(2.0, 0.0, 0.0), odometryNoise)) graph.add(gtsam.BetweenFactorPose2( @@ -49,7 +49,7 @@ class TestPose2SLAMExample(GtsamTestCase): 4, 5, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) # Add pose constraint - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2), model)) # Initialize to noisy points diff --git a/cython/gtsam/tests/test_Pose3.py b/python/gtsam/tests/test_Pose3.py similarity index 100% rename from cython/gtsam/tests/test_Pose3.py rename to python/gtsam/tests/test_Pose3.py diff --git a/cython/gtsam/tests/test_Pose3SLAMExample.py b/python/gtsam/tests/test_Pose3SLAMExample.py similarity index 97% rename from cython/gtsam/tests/test_Pose3SLAMExample.py rename to python/gtsam/tests/test_Pose3SLAMExample.py index 1e9eaac67..fce171b55 100644 --- a/cython/gtsam/tests/test_Pose3SLAMExample.py +++ b/python/gtsam/tests/test_Pose3SLAMExample.py @@ -30,7 +30,7 @@ class TestPose3SLAMExample(GtsamTestCase): fg = gtsam.NonlinearFactorGraph() fg.add(gtsam.NonlinearEqualityPose3(0, p0)) delta = p0.between(p1) - covariance = gtsam.noiseModel_Diagonal.Sigmas( + covariance = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05, 5. * pi / 180, 5. * pi / 180, 5. * pi / 180])) fg.add(gtsam.BetweenFactorPose3(0, 1, delta, covariance)) fg.add(gtsam.BetweenFactorPose3(1, 2, delta, covariance)) diff --git a/cython/gtsam/tests/test_PriorFactor.py b/python/gtsam/tests/test_PriorFactor.py similarity index 88% rename from cython/gtsam/tests/test_PriorFactor.py rename to python/gtsam/tests/test_PriorFactor.py index 66207b800..0582cf5d7 100644 --- a/cython/gtsam/tests/test_PriorFactor.py +++ b/python/gtsam/tests/test_PriorFactor.py @@ -23,14 +23,14 @@ class TestPriorFactor(GtsamTestCase): key = 5 priorPose3 = gtsam.Pose3() - model = gtsam.noiseModel_Unit.Create(6) + model = gtsam.noiseModel.Unit.Create(6) factor = gtsam.PriorFactorPose3(key, priorPose3, model) values.insert(key, priorPose3) self.assertEqual(factor.error(values), 0) key = 3 priorVector = np.array([0., 0., 0.]) - model = gtsam.noiseModel_Unit.Create(3) + model = gtsam.noiseModel.Unit.Create(3) factor = gtsam.PriorFactorVector(key, priorVector, model) values.insert(key, priorVector) self.assertEqual(factor.error(values), 0) @@ -45,14 +45,14 @@ class TestPriorFactor(GtsamTestCase): # define and add Pose3 prior key = 5 priorPose3 = gtsam.Pose3() - model = gtsam.noiseModel_Unit.Create(6) + model = gtsam.noiseModel.Unit.Create(6) graph.addPriorPose3(key, priorPose3, model) self.assertEqual(graph.size(), 1) # define and add Vector prior key = 3 priorVector = np.array([0., 0., 0.]) - model = gtsam.noiseModel_Unit.Create(3) + model = gtsam.noiseModel.Unit.Create(3) graph.addPriorVector(key, priorVector, model) self.assertEqual(graph.size(), 2) diff --git a/cython/gtsam/tests/test_SFMExample.py b/python/gtsam/tests/test_SFMExample.py similarity index 72% rename from cython/gtsam/tests/test_SFMExample.py rename to python/gtsam/tests/test_SFMExample.py index e8fa46186..47a3cbe3e 100644 --- a/cython/gtsam/tests/test_SFMExample.py +++ b/python/gtsam/tests/test_SFMExample.py @@ -15,8 +15,9 @@ import numpy as np import gtsam import gtsam.utils.visual_data_generator as generator from gtsam import symbol +from gtsam.noiseModel import Isotropic, Diagonal from gtsam.utils.test_case import GtsamTestCase - +from gtsam.symbol_shorthand import X, P class TestSFMExample(GtsamTestCase): @@ -34,29 +35,29 @@ class TestSFMExample(GtsamTestCase): graph = gtsam.NonlinearFactorGraph() # Add factors for all measurements - measurementNoise = gtsam.noiseModel_Isotropic.Sigma(2, measurementNoiseSigma) + measurementNoise = Isotropic.Sigma(2, measurementNoiseSigma) for i in range(len(data.Z)): for k in range(len(data.Z[i])): j = data.J[i][k] graph.add(gtsam.GenericProjectionFactorCal3_S2( data.Z[i][k], measurementNoise, - symbol(ord('x'), i), symbol(ord('p'), j), data.K)) + X(i), P(j), data.K)) - posePriorNoise = gtsam.noiseModel_Diagonal.Sigmas(poseNoiseSigmas) - graph.add(gtsam.PriorFactorPose3(symbol(ord('x'), 0), + posePriorNoise = Diagonal.Sigmas(poseNoiseSigmas) + graph.add(gtsam.PriorFactorPose3(X(0), truth.cameras[0].pose(), posePriorNoise)) - pointPriorNoise = gtsam.noiseModel_Isotropic.Sigma(3, pointNoiseSigma) - graph.add(gtsam.PriorFactorPoint3(symbol(ord('p'), 0), + pointPriorNoise = Isotropic.Sigma(3, pointNoiseSigma) + graph.add(gtsam.PriorFactorPoint3(P(0), truth.points[0], pointPriorNoise)) # Initial estimate initialEstimate = gtsam.Values() for i in range(len(truth.cameras)): pose_i = truth.cameras[i].pose() - initialEstimate.insert(symbol(ord('x'), i), pose_i) + initialEstimate.insert(X(i), pose_i) for j in range(len(truth.points)): point_j = truth.points[j] - initialEstimate.insert(symbol(ord('p'), j), point_j) + initialEstimate.insert(P(j), point_j) # Optimization optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) @@ -66,16 +67,16 @@ class TestSFMExample(GtsamTestCase): # Marginalization marginals = gtsam.Marginals(graph, result) - marginals.marginalCovariance(symbol(ord('p'), 0)) - marginals.marginalCovariance(symbol(ord('x'), 0)) + marginals.marginalCovariance(P(0)) + marginals.marginalCovariance(X(0)) # Check optimized results, should be equal to ground truth for i in range(len(truth.cameras)): - pose_i = result.atPose3(symbol(ord('x'), i)) + pose_i = result.atPose3(X(i)) self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) for j in range(len(truth.points)): - point_j = result.atPoint3(symbol(ord('p'), j)) + point_j = result.atPoint3(P(j)) self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) if __name__ == "__main__": diff --git a/cython/gtsam/tests/test_SO4.py b/python/gtsam/tests/test_SO4.py similarity index 100% rename from cython/gtsam/tests/test_SO4.py rename to python/gtsam/tests/test_SO4.py diff --git a/cython/gtsam/tests/test_SOn.py b/python/gtsam/tests/test_SOn.py similarity index 100% rename from cython/gtsam/tests/test_SOn.py rename to python/gtsam/tests/test_SOn.py diff --git a/cython/gtsam/tests/test_Scenario.py b/python/gtsam/tests/test_Scenario.py similarity index 90% rename from cython/gtsam/tests/test_Scenario.py rename to python/gtsam/tests/test_Scenario.py index 09601fba5..fc5965829 100644 --- a/cython/gtsam/tests/test_Scenario.py +++ b/python/gtsam/tests/test_Scenario.py @@ -43,8 +43,11 @@ class TestScenario(GtsamTestCase): # R = v/w, so test if loop crests at 2*R R = v / w T30 = scenario.pose(T) + xyz = T30.rotation().xyz() + if xyz[0] < 0: + xyz = -xyz np.testing.assert_almost_equal( - np.array([math.pi, 0, math.pi]), T30.rotation().xyz()) + np.array([math.pi, 0, math.pi]), xyz) self.gtsamAssertEquals(gtsam.Point3( 0, 0, 2.0 * R), T30.translation(), 1e-9) diff --git a/python/gtsam/tests/test_ShonanAveraging.py b/python/gtsam/tests/test_ShonanAveraging.py new file mode 100644 index 000000000..4c423574d --- /dev/null +++ b/python/gtsam/tests/test_ShonanAveraging.py @@ -0,0 +1,139 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests for Shonan Rotation Averaging. +Author: Frank Dellaert +""" +# pylint: disable=invalid-name, no-name-in-module, no-member + +import unittest + +import gtsam +from gtsam import ShonanAveraging3, ShonanAveragingParameters3 +from gtsam.utils.test_case import GtsamTestCase + +DEFAULT_PARAMS = ShonanAveragingParameters3( + gtsam.LevenbergMarquardtParams.CeresDefaults()) + + +def fromExampleName(name: str, parameters=DEFAULT_PARAMS): + g2oFile = gtsam.findExampleDataFile(name) + return ShonanAveraging3(g2oFile, parameters) + + +class TestShonanAveraging(GtsamTestCase): + """Tests for Shonan Rotation Averaging.""" + + def setUp(self): + """Set up common variables.""" + self.shonan = fromExampleName("toyExample.g2o") + + def test_checkConstructor(self): + self.assertEqual(5, self.shonan.nrUnknowns()) + + D = self.shonan.denseD() + self.assertEqual((15, 15), D.shape) + + Q = self.shonan.denseQ() + self.assertEqual((15, 15), Q.shape) + + L = self.shonan.denseL() + self.assertEqual((15, 15), L.shape) + + def test_buildGraphAt(self): + graph = self.shonan.buildGraphAt(5) + self.assertEqual(7, graph.size()) + + def test_checkOptimality(self): + random = self.shonan.initializeRandomlyAt(4) + lambdaMin = self.shonan.computeMinEigenValue(random) + self.assertAlmostEqual(-414.87376657555996, + lambdaMin, places=3) # Regression test + self.assertFalse(self.shonan.checkOptimality(random)) + + def test_tryOptimizingAt3(self): + initial = self.shonan.initializeRandomlyAt(3) + self.assertFalse(self.shonan.checkOptimality(initial)) + result = self.shonan.tryOptimizingAt(3, initial) + self.assertTrue(self.shonan.checkOptimality(result)) + lambdaMin = self.shonan.computeMinEigenValue(result) + self.assertAlmostEqual(-5.427688831332745e-07, + lambdaMin, places=3) # Regression test + self.assertAlmostEqual(0, self.shonan.costAt(3, result), places=3) + SO3Values = self.shonan.roundSolution(result) + self.assertAlmostEqual(0, self.shonan.cost(SO3Values), places=3) + + def test_tryOptimizingAt4(self): + random = self.shonan.initializeRandomlyAt(4) + result = self.shonan.tryOptimizingAt(4, random) + self.assertTrue(self.shonan.checkOptimality(result)) + self.assertAlmostEqual(0, self.shonan.costAt(4, result), places=2) + lambdaMin = self.shonan.computeMinEigenValue(result) + self.assertAlmostEqual(-5.427688831332745e-07, + lambdaMin, places=3) # Regression test + SO3Values = self.shonan.roundSolution(result) + self.assertAlmostEqual(0, self.shonan.cost(SO3Values), places=3) + + def test_initializeWithDescent(self): + random = self.shonan.initializeRandomlyAt(3) + Qstar3 = self.shonan.tryOptimizingAt(3, random) + lambdaMin, minEigenVector = self.shonan.computeMinEigenVector(Qstar3) + initialQ4 = self.shonan.initializeWithDescent( + 4, Qstar3, minEigenVector, lambdaMin) + self.assertAlmostEqual(5, initialQ4.size()) + + def test_run(self): + initial = self.shonan.initializeRandomly() + result, lambdaMin = self.shonan.run(initial, 5, 10) + self.assertAlmostEqual(0, self.shonan.cost(result), places=2) + self.assertAlmostEqual(-5.427688831332745e-07, + lambdaMin, places=3) # Regression test + + def test_runKlausKarcher(self): + # Load 2D toy example + lmParams = gtsam.LevenbergMarquardtParams.CeresDefaults() + # lmParams.setVerbosityLM("SUMMARY") + g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") + parameters = gtsam.ShonanAveragingParameters2(lmParams) + shonan = gtsam.ShonanAveraging2(g2oFile, parameters) + self.assertAlmostEqual(4, shonan.nrUnknowns()) + + # Check graph building + graph = shonan.buildGraphAt(2) + self.assertAlmostEqual(6, graph.size()) + initial = shonan.initializeRandomly() + result, lambdaMin = shonan.run(initial, 2, 10) + self.assertAlmostEqual(0.0008211, shonan.cost(result), places=5) + self.assertAlmostEqual(0, lambdaMin, places=9) # certificate! + + # Test alpha/beta/gamma prior weighting. + def test_PriorWeights(self): + lmParams = gtsam.LevenbergMarquardtParams.CeresDefaults() + params = ShonanAveragingParameters3(lmParams) + self.assertAlmostEqual(0, params.getAnchorWeight(), 1e-9) + self.assertAlmostEqual(1, params.getKarcherWeight(), 1e-9) + self.assertAlmostEqual(0, params.getGaugesWeight(), 1e-9) + alpha, beta, gamma = 100.0, 200.0, 300.0 + params.setAnchorWeight(alpha) + params.setKarcherWeight(beta) + params.setGaugesWeight(gamma) + self.assertAlmostEqual(alpha, params.getAnchorWeight(), 1e-9) + self.assertAlmostEqual(beta, params.getKarcherWeight(), 1e-9) + self.assertAlmostEqual(gamma, params.getGaugesWeight(), 1e-9) + params.setKarcherWeight(0) + shonan = fromExampleName("Klaus3.g2o", params) + + initial = gtsam.Values() + for i in range(3): + initial.insert(i, gtsam.Rot3()) + self.assertAlmostEqual(3.0756, shonan.cost(initial), places=3) + result, _lambdaMin = shonan.run(initial, 3, 3) + self.assertAlmostEqual(0.0015, shonan.cost(result), places=3) + + +if __name__ == '__main__': + unittest.main() diff --git a/cython/gtsam/tests/test_SimpleCamera.py b/python/gtsam/tests/test_SimpleCamera.py similarity index 82% rename from cython/gtsam/tests/test_SimpleCamera.py rename to python/gtsam/tests/test_SimpleCamera.py index a3654a5f1..efdfec561 100644 --- a/cython/gtsam/tests/test_SimpleCamera.py +++ b/python/gtsam/tests/test_SimpleCamera.py @@ -5,7 +5,7 @@ All Rights Reserved See LICENSE for the license information -PinholeCameraCal3_S2 unit tests. +SimpleCamera unit tests. Author: Frank Dellaert & Duy Nguyen Ta (Python) """ import math @@ -14,7 +14,7 @@ import unittest import numpy as np import gtsam -from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, PinholeCameraCal3_S2 +from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, SimpleCamera from gtsam.utils.test_case import GtsamTestCase K = Cal3_S2(625, 625, 0, 0, 0) @@ -23,14 +23,14 @@ class TestSimpleCamera(GtsamTestCase): def test_constructor(self): pose1 = Pose3(Rot3(np.diag([1, -1, -1])), Point3(0, 0, 0.5)) - camera = PinholeCameraCal3_S2(pose1, K) + camera = SimpleCamera(pose1, K) self.gtsamAssertEquals(camera.calibration(), K, 1e-9) self.gtsamAssertEquals(camera.pose(), pose1, 1e-9) def test_level2(self): # Create a level camera, looking in Y-direction pose2 = Pose2(0.4,0.3,math.pi/2.0) - camera = PinholeCameraCal3_S2.Level(K, pose2, 0.1) + camera = SimpleCamera.Level(K, pose2, 0.1) # expected x = Point3(1,0,0) diff --git a/cython/gtsam/tests/test_StereoVOExample.py b/python/gtsam/tests/test_StereoVOExample.py similarity index 92% rename from cython/gtsam/tests/test_StereoVOExample.py rename to python/gtsam/tests/test_StereoVOExample.py index 3f5f57522..cefc08aab 100644 --- a/cython/gtsam/tests/test_StereoVOExample.py +++ b/python/gtsam/tests/test_StereoVOExample.py @@ -28,11 +28,11 @@ class TestStereoVOExample(GtsamTestCase): # - No noise on measurements ## Create keys for variables - x1 = symbol(ord('x'),1) - x2 = symbol(ord('x'),2) - l1 = symbol(ord('l'),1) - l2 = symbol(ord('l'),2) - l3 = symbol(ord('l'),3) + x1 = symbol('x',1) + x2 = symbol('x',2) + l1 = symbol('l',1) + l2 = symbol('l',2) + l3 = symbol('l',3) ## Create graph container and add factors to it graph = gtsam.NonlinearFactorGraph() @@ -44,7 +44,7 @@ class TestStereoVOExample(GtsamTestCase): ## Create realistic calibration and measurement noise model # format: fx fy skew cx cy baseline K = gtsam.Cal3_S2Stereo(1000, 1000, 0, 320, 240, 0.2) - stereo_model = gtsam.noiseModel_Diagonal.Sigmas(np.array([1.0, 1.0, 1.0])) + stereo_model = gtsam.noiseModel.Diagonal.Sigmas(np.array([1.0, 1.0, 1.0])) ## Add measurements # pose 1 diff --git a/python/gtsam/tests/test_Triangulation.py b/python/gtsam/tests/test_Triangulation.py new file mode 100644 index 000000000..b43ad9b57 --- /dev/null +++ b/python/gtsam/tests/test_Triangulation.py @@ -0,0 +1,80 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Test Triangulation +Author: Frank Dellaert & Fan Jiang (Python) +""" +import unittest + +import numpy as np + +import gtsam as g +from gtsam.utils.test_case import GtsamTestCase +from gtsam import Cal3_S2, Cal3Bundler, Rot3, Pose3, \ + PinholeCameraCal3_S2, Point3, Point2Vector, Pose3Vector, triangulatePoint3 + +class TestVisualISAMExample(GtsamTestCase): + def test_TriangulationExample(self): + # Some common constants + sharedCal = Cal3_S2(1500, 1200, 0, 640, 480) + + # Looking along X-axis, 1 meter above ground plane (x-y) + upright = Rot3.Ypr(-np.pi / 2, 0., -np.pi / 2) + pose1 = Pose3(upright, Point3(0, 0, 1)) + camera1 = PinholeCameraCal3_S2(pose1, sharedCal) + + # create second camera 1 meter to the right of first camera + pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0))) + camera2 = PinholeCameraCal3_S2(pose2, sharedCal) + + # landmark ~5 meters infront of camera + landmark = Point3(5, 0.5, 1.2) + + # 1. Project two landmarks into two cameras and triangulate + z1 = camera1.project(landmark) + z2 = camera2.project(landmark) + + # twoPoses + poses = Pose3Vector() + measurements = Point2Vector() + + poses.append(pose1) + poses.append(pose2) + measurements.append(z1) + measurements.append(z2) + + optimize = True + rank_tol = 1e-9 + + triangulated_landmark = triangulatePoint3(poses,sharedCal, measurements, rank_tol, optimize) + self.gtsamAssertEquals(landmark, triangulated_landmark,1e-9) + + # 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) + measurements = Point2Vector() + measurements.append(z1 - np.array([0.1, 0.5])) + measurements.append(z2 - np.array([-0.2, 0.3])) + + triangulated_landmark = triangulatePoint3(poses,sharedCal, measurements, rank_tol, optimize) + self.gtsamAssertEquals(landmark, triangulated_landmark,1e-2) + # + # # two Poses with Bundler Calibration + # bundlerCal = Cal3Bundler(1500, 0, 0, 640, 480) + # camera1 = PinholeCameraCal3Bundler(pose1, bundlerCal) + # camera2 = PinholeCameraCal3Bundler(pose2, bundlerCal) + # + # z1 = camera1.project(landmark) + # z2 = camera2.project(landmark) + # + # measurements = Point2Vector() + # measurements.append(z1) + # measurements.append(z2) + # + # triangulated_landmark = triangulatePoint3(poses,bundlerCal, measurements, rank_tol, optimize) + # self.gtsamAssertEquals(landmark, triangulated_landmark,1e-9) + +if __name__ == "__main__": + unittest.main() diff --git a/cython/gtsam/tests/test_Values.py b/python/gtsam/tests/test_Values.py similarity index 94% rename from cython/gtsam/tests/test_Values.py rename to python/gtsam/tests/test_Values.py index 20634a21c..dddd11c40 100644 --- a/cython/gtsam/tests/test_Values.py +++ b/python/gtsam/tests/test_Values.py @@ -15,8 +15,7 @@ import numpy as np import gtsam from gtsam import (Cal3_S2, Cal3Bundler, Cal3DS2, EssentialMatrix, Point2, - Point3, Pose2, Pose3, Rot2, Rot3, Unit3, Values, - imuBias_ConstantBias) + Point3, Pose2, Pose3, Rot2, Rot3, Unit3, Values, imuBias) from gtsam.utils.test_case import GtsamTestCase @@ -37,7 +36,7 @@ class TestValues(GtsamTestCase): values.insert(7, Cal3DS2()) values.insert(8, Cal3Bundler()) values.insert(9, E) - values.insert(10, imuBias_ConstantBias()) + values.insert(10, imuBias.ConstantBias()) # Special cases for Vectors and Matrices # Note that gtsam's Eigen Vectors and Matrices requires double-precision @@ -70,8 +69,8 @@ class TestValues(GtsamTestCase): self.gtsamAssertEquals(values.atCal3DS2(7), Cal3DS2(), tol) self.gtsamAssertEquals(values.atCal3Bundler(8), Cal3Bundler(), tol) self.gtsamAssertEquals(values.atEssentialMatrix(9), E, tol) - self.gtsamAssertEquals(values.atimuBias_ConstantBias( - 10), imuBias_ConstantBias(), tol) + self.gtsamAssertEquals(values.atConstantBias( + 10), imuBias.ConstantBias(), tol) # special cases for Vector and Matrix: actualVector = values.atVector(11) diff --git a/cython/gtsam/tests/test_VisualISAMExample.py b/python/gtsam/tests/test_VisualISAMExample.py similarity index 93% rename from cython/gtsam/tests/test_VisualISAMExample.py rename to python/gtsam/tests/test_VisualISAMExample.py index 99d7e6160..6eb05eeee 100644 --- a/cython/gtsam/tests/test_VisualISAMExample.py +++ b/python/gtsam/tests/test_VisualISAMExample.py @@ -46,11 +46,11 @@ class TestVisualISAMExample(GtsamTestCase): isam, result = visual_isam.step(data, isam, result, truth, currentPose) for i in range(len(truth.cameras)): - pose_i = result.atPose3(symbol(ord('x'), i)) + pose_i = result.atPose3(symbol('x', i)) self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) for j in range(len(truth.points)): - point_j = result.atPoint3(symbol(ord('l'), j)) + point_j = result.atPoint3(symbol('l', j)) self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) if __name__ == "__main__": diff --git a/cython/gtsam/tests/test_dataset.py b/python/gtsam/tests/test_dataset.py similarity index 87% rename from cython/gtsam/tests/test_dataset.py rename to python/gtsam/tests/test_dataset.py index 60fb9450d..87fc2ad54 100644 --- a/cython/gtsam/tests/test_dataset.py +++ b/python/gtsam/tests/test_dataset.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest import gtsam -from gtsam import BetweenFactorPose3, BetweenFactorPose3s +from gtsam import BetweenFactorPose3 from gtsam.utils.test_case import GtsamTestCase @@ -37,8 +37,8 @@ class TestDataset(GtsamTestCase): def test_parse3Dfactors(self): """Test parsing into data structure.""" factors = gtsam.parse3DFactors(self.pose3_example_g2o_file) - self.assertEqual(factors.size(), 6) - self.assertIsInstance(factors.at(0), BetweenFactorPose3) + self.assertEqual(len(factors), 6) + self.assertIsInstance(factors[0], BetweenFactorPose3) if __name__ == '__main__': diff --git a/cython/gtsam/tests/test_dsf_map.py b/python/gtsam/tests/test_dsf_map.py similarity index 100% rename from cython/gtsam/tests/test_dsf_map.py rename to python/gtsam/tests/test_dsf_map.py diff --git a/cython/gtsam/tests/test_initialize_pose3.py b/python/gtsam/tests/test_initialize_pose3.py similarity index 96% rename from cython/gtsam/tests/test_initialize_pose3.py rename to python/gtsam/tests/test_initialize_pose3.py index 3aa7e3470..6d7f66653 100644 --- a/cython/gtsam/tests/test_initialize_pose3.py +++ b/python/gtsam/tests/test_initialize_pose3.py @@ -24,7 +24,7 @@ class TestValues(GtsamTestCase): def setUp(self): - model = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) + model = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) # We consider a small graph: # symbolic FG @@ -64,9 +64,8 @@ class TestValues(GtsamTestCase): def test_orientations(self): pose3Graph = gtsam.InitializePose3.buildPose3graph(self.graph) - initial = gtsam.InitializePose3.computeOrientationsChordal(pose3Graph) - + # comparison is up to M_PI, that's why we add some multiples of 2*M_PI self.gtsamAssertEquals(initial.atRot3(x0), self.R0, 1e-6) self.gtsamAssertEquals(initial.atRot3(x1), self.R1, 1e-6) @@ -77,7 +76,7 @@ class TestValues(GtsamTestCase): g2oFile = gtsam.findExampleDataFile("pose3example-grid") is3D = True inputGraph, expectedValues = gtsam.readG2o(g2oFile, is3D) - priorModel = gtsam.noiseModel_Unit.Create(6) + priorModel = gtsam.noiseModel.Unit.Create(6) inputGraph.add(gtsam.PriorFactorPose3(0, Pose3(), priorModel)) initial = gtsam.InitializePose3.initialize(inputGraph) diff --git a/cython/gtsam/tests/test_logging_optimizer.py b/python/gtsam/tests/test_logging_optimizer.py similarity index 98% rename from cython/gtsam/tests/test_logging_optimizer.py rename to python/gtsam/tests/test_logging_optimizer.py index 2560a72a2..47eb32e7b 100644 --- a/cython/gtsam/tests/test_logging_optimizer.py +++ b/python/gtsam/tests/test_logging_optimizer.py @@ -21,7 +21,7 @@ from gtsam.utils.test_case import GtsamTestCase from gtsam.utils.logging_optimizer import gtsam_optimize KEY = 0 -MODEL = gtsam.noiseModel_Unit.Create(3) +MODEL = gtsam.noiseModel.Unit.Create(3) class TestOptimizeComet(GtsamTestCase): diff --git a/cython/gtsam/tests/__init__.py b/python/gtsam/utils/__init__.py similarity index 100% rename from cython/gtsam/tests/__init__.py rename to python/gtsam/utils/__init__.py diff --git a/cython/gtsam/utils/circlePose3.py b/python/gtsam/utils/circlePose3.py similarity index 79% rename from cython/gtsam/utils/circlePose3.py rename to python/gtsam/utils/circlePose3.py index 7012548f4..e1def9427 100644 --- a/cython/gtsam/utils/circlePose3.py +++ b/python/gtsam/utils/circlePose3.py @@ -1,9 +1,10 @@ import gtsam +import math import numpy as np -from math import pi, cos, sin +from math import pi -def circlePose3(numPoses=8, radius=1.0, symbolChar=0): +def circlePose3(numPoses=8, radius=1.0, symbolChar='\0'): """ circlePose3 generates a set of poses in a circle. This function returns those poses inside a gtsam.Values object, with sequential @@ -18,10 +19,6 @@ def circlePose3(numPoses=8, radius=1.0, symbolChar=0): Vehicle at p0 is looking towards y axis (X-axis points towards world y) """ - # Force symbolChar to be a single character - if type(symbolChar) is str: - symbolChar = ord(symbolChar[0]) - values = gtsam.Values() theta = 0.0 dtheta = 2 * pi / numPoses @@ -29,7 +26,7 @@ def circlePose3(numPoses=8, radius=1.0, symbolChar=0): np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]], order='F')) for i in range(numPoses): key = gtsam.symbol(symbolChar, i) - gti = gtsam.Point3(radius * cos(theta), radius * sin(theta), 0) + gti = gtsam.Point3(radius * math.cos(theta), radius * math.sin(theta), 0) oRi = gtsam.Rot3.Yaw( -theta) # negative yaw goes counterclockwise, with Z down ! gTi = gtsam.Pose3(gRo.compose(oRi), gti) diff --git a/cython/gtsam/utils/logging_optimizer.py b/python/gtsam/utils/logging_optimizer.py similarity index 100% rename from cython/gtsam/utils/logging_optimizer.py rename to python/gtsam/utils/logging_optimizer.py diff --git a/cython/gtsam/utils/plot.py b/python/gtsam/utils/plot.py similarity index 81% rename from cython/gtsam/utils/plot.py rename to python/gtsam/utils/plot.py index 1e976a69e..b2c2ab879 100644 --- a/cython/gtsam/utils/plot.py +++ b/python/gtsam/utils/plot.py @@ -1,9 +1,11 @@ """Various plotting utlities.""" -import numpy as np +# pylint: disable=no-member, invalid-name + import matplotlib.pyplot as plt +import numpy as np from matplotlib import patches -from mpl_toolkits.mplot3d import Axes3D +from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-import import gtsam @@ -109,7 +111,7 @@ def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None): # get rotation and translation (center) gRp = pose.rotation().matrix() # rotation from pose to global t = pose.translation() - origin = np.array([t.x(), t.y()]) + origin = t # draw the camera axes x_axis = origin + gRp[:, 0] * axis_length @@ -155,7 +157,6 @@ def plot_pose2(fignum, pose, axis_length=0.1, covariance=None, axes.set_xlabel(axis_labels[0]) axes.set_ylabel(axis_labels[1]) - axes.set_zlabel(axis_labels[2]) return fig @@ -170,9 +171,9 @@ def plot_point3_on_axes(axes, point, linespec, P=None): linespec (string): String representing formatting options for Matplotlib. P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. """ - axes.plot([point.x()], [point.y()], [point.z()], linespec) + axes.plot([point[0]], [point[1]], [point[2]], linespec) if P is not None: - plot_covariance_ellipse_3d(axes, point.vector(), P) + plot_covariance_ellipse_3d(axes, point, P) def plot_point3(fignum, point, linespec, P=None, @@ -222,9 +223,8 @@ def plot_3d_points(fignum, values, linespec="g*", marginals=None, keys = values.keys() # Plot points and covariance matrices - for i in range(keys.size()): + for key in keys: try: - key = keys.at(i) point = values.atPoint3(key) if marginals is not None: covariance = marginals.marginalCovariance(key) @@ -254,7 +254,7 @@ def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1): """ # get rotation and translation (center) gRp = pose.rotation().matrix() # rotation from pose to global - origin = pose.translation().vector() + origin = pose.translation() # draw the camera axes x_axis = origin + gRp[:, 0] * axis_length @@ -309,61 +309,84 @@ def plot_pose3(fignum, pose, axis_length=0.1, P=None, def plot_trajectory(fignum, values, scale=1, marginals=None, title="Plot Trajectory", axis_labels=('X axis', 'Y axis', 'Z axis')): """ - Plot a complete 3D trajectory using poses in `values`. + Plot a complete 2D/3D trajectory using poses in `values`. Args: fignum (int): Integer representing the figure number to use for plotting. - values (gtsam.Values): Values dict containing the poses. + values (gtsam.Values): Values containing some Pose2 and/or Pose3 values. scale (float): Value to scale the poses by. marginals (gtsam.Marginals): Marginalized probability values of the estimation. Used to plot uncertainty bounds. title (string): The title of the plot. axis_labels (iterable[string]): List of axis labels to set. """ - pose3Values = gtsam.utilities_allPose3s(values) - keys = gtsam.KeyVector(pose3Values.keys()) - lastIndex = None + fig = plt.figure(fignum) + axes = fig.gca(projection='3d') - for i in range(keys.size()): - key = keys.at(i) - try: - pose = pose3Values.atPose3(key) - except: - print("Warning: no Pose3 at key: {0}".format(key)) + axes.set_xlabel(axis_labels[0]) + axes.set_ylabel(axis_labels[1]) + axes.set_zlabel(axis_labels[2]) - if lastIndex is not None: - lastKey = keys.at(lastIndex) - try: - lastPose = pose3Values.atPose3(lastKey) - except: - print("Warning: no Pose3 at key: {0}".format(lastKey)) - pass + # Plot 2D poses, if any + poses = gtsam.utilities.allPose2s(values) + for key in poses.keys(): + pose = poses.atPose2(key) + if marginals: + covariance = marginals.marginalCovariance(key) + else: + covariance = None - if marginals: - covariance = marginals.marginalCovariance(lastKey) - else: - covariance = None + plot_pose2_on_axes(axes, pose, covariance=covariance, + axis_length=scale) - fig = plot_pose3(fignum, lastPose, P=covariance, - axis_length=scale, axis_labels=axis_labels) + # Then 3D poses, if any + poses = gtsam.utilities.allPose3s(values) + for key in poses.keys(): + pose = poses.atPose3(key) + if marginals: + covariance = marginals.marginalCovariance(key) + else: + covariance = None - lastIndex = i - - # Draw final pose - if lastIndex is not None: - lastKey = keys.at(lastIndex) - try: - lastPose = pose3Values.atPose3(lastKey) - if marginals: - covariance = marginals.marginalCovariance(lastKey) - else: - covariance = None - - fig = plot_pose3(fignum, lastPose, P=covariance, - axis_length=scale, axis_labels=axis_labels) - - except: - pass + plot_pose3_on_axes(axes, pose, P=covariance, + axis_length=scale) fig.suptitle(title) fig.canvas.set_window_title(title.lower()) + + +def plot_incremental_trajectory(fignum, values, start=0, + scale=1, marginals=None, + time_interval=0.0): + """ + Incrementally plot a complete 3D trajectory using poses in `values`. + + Args: + fignum (int): Integer representing the figure number to use for plotting. + values (gtsam.Values): Values dict containing the poses. + start (int): Starting index to start plotting from. + scale (float): Value to scale the poses by. + marginals (gtsam.Marginals): Marginalized probability values of the estimation. + Used to plot uncertainty bounds. + time_interval (float): Time in seconds to pause between each rendering. + Used to create animation effect. + """ + fig = plt.figure(fignum) + axes = fig.gca(projection='3d') + + poses = gtsam.utilities.allPose3s(values) + keys = gtsam.KeyVector(poses.keys()) + + for key in keys[start:]: + if values.exists(key): + pose_i = values.atPose3(key) + plot_pose3(fignum, pose_i, scale) + + # Update the plot space to encompass all plotted points + axes.autoscale() + + # Set the 3 axes equal + set_axes_equal(fignum) + + # Pause for a fixed amount of seconds + plt.pause(time_interval) diff --git a/cython/gtsam/utils/test_case.py b/python/gtsam/utils/test_case.py similarity index 80% rename from cython/gtsam/utils/test_case.py rename to python/gtsam/utils/test_case.py index 7df1e6ee9..3effd7f65 100644 --- a/cython/gtsam/utils/test_case.py +++ b/python/gtsam/utils/test_case.py @@ -21,7 +21,11 @@ class GtsamTestCase(unittest.TestCase): Keyword Arguments: tol {float} -- tolerance passed to 'equals', default 1e-9 """ - equal = actual.equals(expected, tol) + import numpy + if isinstance(expected, numpy.ndarray): + equal = numpy.allclose(actual, expected, atol=tol) + else: + equal = actual.equals(expected, tol) if not equal: raise self.failureException( "Values are not equal:\n{}!={}".format(actual, expected)) diff --git a/cython/gtsam/utils/visual_data_generator.py b/python/gtsam/utils/visual_data_generator.py similarity index 79% rename from cython/gtsam/utils/visual_data_generator.py rename to python/gtsam/utils/visual_data_generator.py index f04588e70..32ccbc8fa 100644 --- a/cython/gtsam/utils/visual_data_generator.py +++ b/python/gtsam/utils/visual_data_generator.py @@ -1,9 +1,10 @@ from __future__ import print_function import numpy as np - +import math +from math import pi import gtsam -from gtsam import Cal3_S2, PinholeCameraCal3_S2, Point2, Point3, Pose3 +from gtsam import Point3, Pose3, PinholeCameraCal3_S2, Cal3_S2 class Options: @@ -31,7 +32,7 @@ class GroundTruth: def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): self.K = K self.cameras = [Pose3()] * nrCameras - self.points = [Point3()] * nrPoints + self.points = [Point3(0, 0, 0)] * nrPoints def print_(self, s=""): print(s) @@ -55,20 +56,20 @@ class Data: def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): self.K = K - self.Z = [x[:] for x in [[Point2()] * nrPoints] * nrCameras] + self.Z = [x[:] for x in [[gtsam.Point2()] * nrPoints] * nrCameras] self.J = [x[:] for x in [[0] * nrPoints] * nrCameras] self.odometry = [Pose3()] * nrCameras # Set Noise parameters self.noiseModels = Data.NoiseModels() - self.noiseModels.posePrior = gtsam.noiseModel_Diagonal.Sigmas( + self.noiseModels.posePrior = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.001, 0.001, 0.001, 0.1, 0.1, 0.1])) - # noiseModels.odometry = gtsam.noiseModel_Diagonal.Sigmas( + # noiseModels.odometry = gtsam.noiseModel.Diagonal.Sigmas( # np.array([0.001,0.001,0.001,0.1,0.1,0.1])) - self.noiseModels.odometry = gtsam.noiseModel_Diagonal.Sigmas( + self.noiseModels.odometry = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05, 0.2, 0.2, 0.2])) - self.noiseModels.pointPrior = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) - self.noiseModels.measurement = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) + self.noiseModels.pointPrior = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) + self.noiseModels.measurement = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) def generate_data(options): @@ -84,8 +85,8 @@ def generate_data(options): if options.triangle: # Create a triangle target, just 3 points on a plane r = 10 for j in range(len(truth.points)): - theta = j * 2 * np.pi / nrPoints - truth.points[j] = Point3(r * np.cos(theta), r * np.sin(theta), 0) + theta = j * 2 * pi / nrPoints + truth.points[j] = Point3(r * math.cos(theta), r * math.sin(theta), 0) else: # 3D landmarks as vertices of a cube truth.points = [ Point3(10, 10, 10), Point3(-10, 10, 10), @@ -98,10 +99,10 @@ def generate_data(options): height = 10 r = 40 for i in range(options.nrCameras): - theta = i * 2 * np.pi / options.nrCameras - t = Point3(r * np.cos(theta), r * np.sin(theta), height) + theta = i * 2 * pi / options.nrCameras + t = Point3(r * math.cos(theta), r * math.sin(theta), height) truth.cameras[i] = PinholeCameraCal3_S2.Lookat(t, - Point3(), + Point3(0, 0, 0), Point3(0, 0, 1), truth.K) # Create measurements diff --git a/cython/gtsam/utils/visual_isam.py b/python/gtsam/utils/visual_isam.py similarity index 88% rename from cython/gtsam/utils/visual_isam.py rename to python/gtsam/utils/visual_isam.py index b0ebe68c3..a8fed4b23 100644 --- a/cython/gtsam/utils/visual_isam.py +++ b/python/gtsam/utils/visual_isam.py @@ -25,7 +25,7 @@ def initialize(data, truth, options): newFactors = gtsam.NonlinearFactorGraph() initialEstimates = gtsam.Values() for i in range(2): - ii = symbol(ord('x'), i) + ii = symbol('x', i) if i == 0: if options.hardConstraint: # add hard constraint newFactors.add( @@ -41,10 +41,10 @@ def initialize(data, truth, options): # Add visual measurement factors from two first poses and initialize # observed landmarks for i in range(2): - ii = symbol(ord('x'), i) + ii = symbol('x', i) for k in range(len(data.Z[i])): j = data.J[i][k] - jj = symbol(ord('l'), j) + jj = symbol('l', j) newFactors.add( gtsam.GenericProjectionFactorCal3_S2(data.Z[i][ k], data.noiseModels.measurement, ii, jj, data.K)) @@ -59,8 +59,8 @@ def initialize(data, truth, options): # Add odometry between frames 0 and 1 newFactors.add( gtsam.BetweenFactorPose3( - symbol(ord('x'), 0), - symbol(ord('x'), 1), data.odometry[1], data.noiseModels.odometry)) + symbol('x', 0), + symbol('x', 1), data.odometry[1], data.noiseModels.odometry)) # Update ISAM if options.batchInitialization: # Do a full optimize for first two poses @@ -98,28 +98,28 @@ def step(data, isam, result, truth, currPoseIndex): odometry = data.odometry[prevPoseIndex] newFactors.add( gtsam.BetweenFactorPose3( - symbol(ord('x'), prevPoseIndex), - symbol(ord('x'), currPoseIndex), odometry, + symbol('x', prevPoseIndex), + symbol('x', currPoseIndex), odometry, data.noiseModels.odometry)) # Add visual measurement factors and initializations as necessary for k in range(len(data.Z[currPoseIndex])): zij = data.Z[currPoseIndex][k] j = data.J[currPoseIndex][k] - jj = symbol(ord('l'), j) + jj = symbol('l', j) newFactors.add( gtsam.GenericProjectionFactorCal3_S2( zij, data.noiseModels.measurement, - symbol(ord('x'), currPoseIndex), jj, data.K)) + symbol('x', currPoseIndex), jj, data.K)) # TODO: initialize with something other than truth if not result.exists(jj) and not initialEstimates.exists(jj): lmInit = truth.points[j] initialEstimates.insert(jj, lmInit) # Initial estimates for the new pose. - prevPose = result.atPose3(symbol(ord('x'), prevPoseIndex)) + prevPose = result.atPose3(symbol('x', prevPoseIndex)) initialEstimates.insert( - symbol(ord('x'), currPoseIndex), prevPose.compose(odometry)) + symbol('x', currPoseIndex), prevPose.compose(odometry)) # Update ISAM # figure(1)tic diff --git a/cython/gtsam_unstable/__init__.py b/python/gtsam_unstable/__init__.py similarity index 100% rename from cython/gtsam_unstable/__init__.py rename to python/gtsam_unstable/__init__.py diff --git a/cython/gtsam_unstable/examples/FixedLagSmootherExample.py b/python/gtsam_unstable/examples/FixedLagSmootherExample.py similarity index 84% rename from cython/gtsam_unstable/examples/FixedLagSmootherExample.py rename to python/gtsam_unstable/examples/FixedLagSmootherExample.py index 786701e0f..7d2cea8ae 100644 --- a/cython/gtsam_unstable/examples/FixedLagSmootherExample.py +++ b/python/gtsam_unstable/examples/FixedLagSmootherExample.py @@ -16,16 +16,6 @@ import numpy as np import gtsam import gtsam_unstable - -def _timestamp_key_value(key, value): - """ - - """ - return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue( - key, value - ) - - def BatchFixedLagSmootherExample(): """ Runs a batch fixed smoother on an agent with two odometry @@ -45,21 +35,21 @@ def BatchFixedLagSmootherExample(): # Create a prior on the first pose, placing it at the origin prior_mean = gtsam.Pose2(0, 0, 0) - prior_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + prior_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) X1 = 0 new_factors.push_back(gtsam.PriorFactorPose2(X1, prior_mean, prior_noise)) new_values.insert(X1, prior_mean) - new_timestamps.insert(_timestamp_key_value(X1, 0.0)) + new_timestamps.insert((X1, 0.0)) delta_time = 0.25 time = 0.25 while time <= 3.0: - previous_key = 1000 * (time - delta_time) - current_key = 1000 * time + previous_key = int(1000 * (time - delta_time)) + current_key = int(1000 * time) # assign current key to the current timestamp - new_timestamps.insert(_timestamp_key_value(current_key, time)) + new_timestamps.insert((current_key, time)) # Add a guess for this pose to the new values # Assume that the robot moves at 2 m/s. Position is time[s] * 2[m/s] @@ -69,14 +59,14 @@ def BatchFixedLagSmootherExample(): # Add odometry factors from two different sources with different error # stats odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02) - odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_1 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.05])) new_factors.push_back(gtsam.BetweenFactorPose2( previous_key, current_key, odometry_measurement_1, odometry_noise_1 )) odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01) - odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_2 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05])) new_factors.push_back(gtsam.BetweenFactorPose2( previous_key, current_key, odometry_measurement_2, odometry_noise_2 diff --git a/cython/gtsam_unstable/examples/TimeOfArrivalExample.py b/python/gtsam_unstable/examples/TimeOfArrivalExample.py similarity index 98% rename from cython/gtsam_unstable/examples/TimeOfArrivalExample.py rename to python/gtsam_unstable/examples/TimeOfArrivalExample.py index 6ba06f0f2..59f008a05 100644 --- a/cython/gtsam_unstable/examples/TimeOfArrivalExample.py +++ b/python/gtsam_unstable/examples/TimeOfArrivalExample.py @@ -12,7 +12,7 @@ Author: Frank Dellaert # pylint: disable=invalid-name, no-name-in-module from gtsam import (LevenbergMarquardtOptimizer, LevenbergMarquardtParams, - NonlinearFactorGraph, Point3, Values, noiseModel_Isotropic) + NonlinearFactorGraph, Point3, Values, noiseModel) from gtsam_unstable import Event, TimeOfArrival, TOAFactor # units @@ -64,7 +64,7 @@ def create_graph(microphones, simulatedTOA): graph = NonlinearFactorGraph() # Create a noise model for the TOA error - model = noiseModel_Isotropic.Sigma(1, 0.5 * MS) + model = noiseModel.Isotropic.Sigma(1, 0.5 * MS) K = len(microphones) key = 0 diff --git a/cython/gtsam/utils/__init__.py b/python/gtsam_unstable/examples/__init__.py similarity index 100% rename from cython/gtsam/utils/__init__.py rename to python/gtsam_unstable/examples/__init__.py diff --git a/python/gtsam_unstable/gtsam_unstable.tpl b/python/gtsam_unstable/gtsam_unstable.tpl new file mode 100644 index 000000000..1d9dfaa40 --- /dev/null +++ b/python/gtsam_unstable/gtsam_unstable.tpl @@ -0,0 +1,44 @@ +/** + * @file gtsam.cpp + * @brief The auto-generated wrapper C++ source code. + * @author Duy-Nguyen Ta, Fan Jiang, Matthew Sklar + * @date Aug. 18, 2020 + * + * ** THIS FILE IS AUTO-GENERATED, DO NOT MODIFY! ** + */ + +// Include relevant boost libraries required by GTSAM +{include_boost} + +#include +#include +#include +#include "gtsam/base/serialization.h" +#include "gtsam/nonlinear/utilities.h" // for RedirectCout. + +// These are the included headers listed in `gtsam_unstable.i` +{includes} +#include + +{boost_class_export} + +{hoder_type} + +#include "python/gtsam_unstable/preamble.h" + +using namespace std; + +namespace py = pybind11; + +PYBIND11_MODULE({module_name}, m_) {{ + m_.doc() = "pybind11 wrapper of {module_name}"; + + // Note here we need to import the dependent library + py::module::import("gtsam"); + +{wrapped_namespace} + +#include "python/gtsam_unstable/specializations.h" + +}} + diff --git a/cython/gtsam_unstable/examples/__init__.py b/python/gtsam_unstable/preamble.h similarity index 100% rename from cython/gtsam_unstable/examples/__init__.py rename to python/gtsam_unstable/preamble.h diff --git a/cython/gtsam_unstable/tests/__init__.py b/python/gtsam_unstable/specializations.h similarity index 100% rename from cython/gtsam_unstable/tests/__init__.py rename to python/gtsam_unstable/specializations.h diff --git a/python/gtsam_unstable/tests/__init__.py b/python/gtsam_unstable/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cython/gtsam_unstable/tests/test_FixedLagSmootherExample.py b/python/gtsam_unstable/tests/test_FixedLagSmootherExample.py similarity index 88% rename from cython/gtsam_unstable/tests/test_FixedLagSmootherExample.py rename to python/gtsam_unstable/tests/test_FixedLagSmootherExample.py index 8d3af311f..c1ccd1ea1 100644 --- a/cython/gtsam_unstable/tests/test_FixedLagSmootherExample.py +++ b/python/gtsam_unstable/tests/test_FixedLagSmootherExample.py @@ -16,13 +16,6 @@ import gtsam import gtsam_unstable from gtsam.utils.test_case import GtsamTestCase - -def _timestamp_key_value(key, value): - return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue( - key, value - ) - - class TestFixedLagSmootherExample(GtsamTestCase): ''' Tests the fixed lag smoother wrapper @@ -47,14 +40,14 @@ class TestFixedLagSmootherExample(GtsamTestCase): # Create a prior on the first pose, placing it at the origin prior_mean = gtsam.Pose2(0, 0, 0) - prior_noise = gtsam.noiseModel_Diagonal.Sigmas( + prior_noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.3, 0.3, 0.1])) X1 = 0 new_factors.push_back( gtsam.PriorFactorPose2( X1, prior_mean, prior_noise)) new_values.insert(X1, prior_mean) - new_timestamps.insert(_timestamp_key_value(X1, 0.0)) + new_timestamps.insert((X1, 0.0)) delta_time = 0.25 time = 0.25 @@ -80,11 +73,11 @@ class TestFixedLagSmootherExample(GtsamTestCase): # and its two odometers measure the change. The smoothed # result is then compared to the ground truth while time <= 3.0: - previous_key = 1000 * (time - delta_time) - current_key = 1000 * time + previous_key = int(1000 * (time - delta_time)) + current_key = int(1000 * time) # assign current key to the current timestamp - new_timestamps.insert(_timestamp_key_value(current_key, time)) + new_timestamps.insert((current_key, time)) # Add a guess for this pose to the new values # Assume that the robot moves at 2 m/s. Position is time[s] * @@ -95,7 +88,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): # Add odometry factors from two different sources with different # error stats odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02) - odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_1 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.05])) new_factors.push_back( gtsam.BetweenFactorPose2( @@ -105,7 +98,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): odometry_noise_1)) odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01) - odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_2 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05])) new_factors.push_back( gtsam.BetweenFactorPose2( diff --git a/python/requirements.txt b/python/requirements.txt new file mode 100644 index 000000000..481d27d8e --- /dev/null +++ b/python/requirements.txt @@ -0,0 +1,2 @@ +numpy>=1.11.0 +pyparsing>=2.4.2 diff --git a/cython/setup.py.in b/python/setup.py.in similarity index 70% rename from cython/setup.py.in rename to python/setup.py.in index 98a05c9f6..55431a9ad 100644 --- a/cython/setup.py.in +++ b/python/setup.py.in @@ -1,25 +1,20 @@ import os import sys + try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup, find_packages -packages = find_packages() - +packages = find_packages(where=".") +print("PACKAGES: ", packages) package_data = { - package: - [f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.pyd')] - for package in packages + '': [ + './*.so', + './*.dll', + ] } -cython_install_requirements = open("${CYTHON_INSTALL_REQUIREMENTS_FILE}").readlines() - -install_requires = [line.strip() \ - for line in cython_install_requirements \ - if len(line.strip()) > 0 and not line.strip().startswith('#') -] - # Cleaner to read in the contents rather than copy them over. readme_contents = open("${PROJECT_SOURCE_DIR}/README.md").read() @@ -33,8 +28,6 @@ setup( license='Simplified BSD license', keywords='slam sam robotics localization mapping optimization', long_description=readme_contents, - long_description_content_type='text/markdown', - python_requires='>=2.7', # https://pypi.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 5 - Production/Stable', @@ -48,8 +41,9 @@ setup( 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], - packages=packages, package_data=package_data, - install_requires=install_requires + test_suite="gtsam.tests", + install_requires=["numpy"], + zip_safe=False, ) diff --git a/tests/testNonlinearISAM.cpp b/tests/testNonlinearISAM.cpp index 974806612..4249f5bc4 100644 --- a/tests/testNonlinearISAM.cpp +++ b/tests/testNonlinearISAM.cpp @@ -308,7 +308,7 @@ TEST(testNonlinearISAM, loop_closures ) { // check if edge const auto betweenPose = parseEdge(is, tag); if (betweenPose) { - Key id1, id2; + size_t id1, id2; tie(id1, id2) = betweenPose->first; graph.emplace_shared >(Symbol('x', id2), Symbol('x', id1), betweenPose->second, model); diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 2616ab103..dc19801a2 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -541,7 +541,6 @@ TEST(NonlinearOptimizer, subclass_solver) { } /* ************************************************************************* */ -#include TEST( NonlinearOptimizer, logfile ) { NonlinearFactorGraph fg(example::createReallyNonlinearFactorGraph()); diff --git a/tests/testTranslationRecovery.cpp b/tests/testTranslationRecovery.cpp index 5a98c3bf5..eb34ba803 100644 --- a/tests/testTranslationRecovery.cpp +++ b/tests/testTranslationRecovery.cpp @@ -49,15 +49,17 @@ TEST(TranslationRecovery, BAL) { poses, {{0, 1}, {0, 2}, {1, 2}}); // Check - const Pose3 wTa = poses.at(0), wTb = poses.at(1), - wTc = poses.at(2); - const Point3 Ta = wTa.translation(), Tb = wTb.translation(), - Tc = wTc.translation(); - const Rot3 aRw = wTa.rotation().inverse(); - const Unit3 w_aZb = relativeTranslations.at({0, 1}); - EXPECT(assert_equal(Unit3(Tb - Ta), w_aZb)); - const Unit3 w_aZc = relativeTranslations.at({0, 2}); - EXPECT(assert_equal(Unit3(Tc - Ta), w_aZc)); + Unit3 w_aZb_stored; // measurement between 0 and 1 stored for next unit test + for(auto& unitTranslation : relativeTranslations) { + const Pose3 wTa = poses.at(unitTranslation.key1()), + wTb = poses.at(unitTranslation.key2()); + const Point3 Ta = wTa.translation(), Tb = wTb.translation(); + const Unit3 w_aZb = unitTranslation.measured(); + EXPECT(assert_equal(Unit3(Tb - Ta), w_aZb)); + if(unitTranslation.key1() == 0 && unitTranslation.key2() == 1) { + w_aZb_stored = unitTranslation.measured(); + } + } TranslationRecovery algorithm(relativeTranslations); const auto graph = algorithm.buildGraph(); @@ -69,10 +71,14 @@ TEST(TranslationRecovery, BAL) { // Check result for first two translations, determined by prior EXPECT(assert_equal(Point3(0, 0, 0), result.at(0))); - EXPECT(assert_equal(Point3(2 * w_aZb.point3()), result.at(1))); + EXPECT(assert_equal(Point3(2 * w_aZb_stored.point3()), result.at(1))); // Check that the third translations is correct - Point3 expected = (Tc - Ta) * (scale / (Tb - Ta).norm()); + Point3 Ta = poses.at(0).translation(); + Point3 Tb = poses.at(1).translation(); + Point3 Tc = poses.at(2).translation(); + Point3 expected = + (Tc - Ta) * (scale / (Tb - Ta).norm()); EXPECT(assert_equal(expected, result.at(2), 1e-4)); // TODO(frank): how to get stats back? diff --git a/timing/timeFrobeniusFactor.cpp b/timing/timeShonanFactor.cpp similarity index 82% rename from timing/timeFrobeniusFactor.cpp rename to timing/timeShonanFactor.cpp index 8bdda968f..207d54a4d 100644 --- a/timing/timeFrobeniusFactor.cpp +++ b/timing/timeShonanFactor.cpp @@ -10,8 +10,8 @@ * -------------------------------------------------------------------------- */ /** - * @file timeFrobeniusFactor.cpp - * @brief time FrobeniusFactor with BAL file + * @file timeShonanFactor.cpp + * @brief time ShonanFactor with BAL file * @author Frank Dellaert * @date 2019 */ @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include @@ -42,7 +42,7 @@ static SharedNoiseModel gNoiseModel = noiseModel::Unit::Create(2); int main(int argc, char* argv[]) { // primitive argument parsing: if (argc > 3) { - throw runtime_error("Usage: timeFrobeniusFactor [g2oFile]"); + throw runtime_error("Usage: timeShonanFactor [g2oFile]"); } string g2oFile; @@ -57,8 +57,8 @@ int main(int argc, char* argv[]) { } // Read G2O file - const auto factors = parse3DFactors(g2oFile); - const auto poses = parse3DPoses(g2oFile); + const auto measurements = parseMeasurements(g2oFile); + const auto poses = parseVariables(g2oFile); // Build graph NonlinearFactorGraph graph; @@ -66,12 +66,12 @@ int main(int argc, char* argv[]) { auto priorModel = noiseModel::Isotropic::Sigma(6, 10000); graph.add(PriorFactor(0, SOn::identity(4), priorModel)); auto G = boost::make_shared(SOn::VectorizedGenerators(4)); - for (const auto& factor : factors) { - const auto& keys = factor->keys(); - const auto& Tij = factor->measured(); - const auto& model = factor->noiseModel(); - graph.emplace_shared( - keys[0], keys[1], Rot3(Tij.rotation().matrix()), 4, model, G); + for (const auto &m : measurements) { + const auto &keys = m.keys(); + const Rot3 &Rij = m.measured(); + const auto &model = m.noiseModel(); + graph.emplace_shared( + keys[0], keys[1], Rij, 4, model, G); } std::mt19937 rng(42); diff --git a/update_wrap.sh b/update_wrap.sh new file mode 100755 index 000000000..61f55e32e --- /dev/null +++ b/update_wrap.sh @@ -0,0 +1,13 @@ +#!/bin/bash +REF=${1-master} # branch or tag; defaults to 'master' if parameter 1 not present +REMOTE=wrap # just a name to identify the remote +REPO=git@github.com:borglab/wrap.git # replace this with your repository URL +FOLDER=wrap # where to mount the subtree + +git remote add $REMOTE --no-tags $REPO +if [[ -d $FOLDER ]]; then # update the existing subtree + git subtree pull $REMOTE $REF --prefix=$FOLDER --squash -m "Merging '$REF' into '$FOLDER'" +else # add the subtree + git subtree add $REMOTE $REF --prefix=$FOLDER --squash -m "Merging '$REF' into '$FOLDER'" +fi +git remote remove $REMOTE \ No newline at end of file diff --git a/wrap/.github/workflows/ci.yml b/wrap/.github/workflows/ci.yml new file mode 100644 index 000000000..2e38bc3dd --- /dev/null +++ b/wrap/.github/workflows/ci.yml @@ -0,0 +1,52 @@ +name: Python CI + +on: [push, pull_request] + +jobs: + build: + name: ${{ matrix.name }} 🐍 ${{ matrix.python_version }} + runs-on: ${{ matrix.os }} + + env: + PYTHON_VERSION: ${{ matrix.python_version }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + ubuntu-18.04 + ] + + python_version: [3] + include: + - name: ubuntu-18.04 + os: ubuntu-18.04 + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + sudo apt-get -y update + + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + - name: Build (Linux) + if: runner.os == 'Linux' + run: | + sudo pip$PYTHON_VERSION install -r requirements.txt + cd tests + python$PYTHON_VERSION test_pybind_wrapper.py + python$PYTHON_VERSION test_matlab_wrapper.py + - name: Build (macOS) + if: runner.os == 'macOS' + run: | + pip$PYTHON_VERSION install -r requirements.txt + cd tests + python$PYTHON_VERSION test_pybind_wrapper.py + python$PYTHON_VERSION test_matlab_wrapper.py \ No newline at end of file diff --git a/wrap/.gitignore b/wrap/.gitignore new file mode 100644 index 000000000..38da6d9d1 --- /dev/null +++ b/wrap/.gitignore @@ -0,0 +1,2 @@ +__pycache__/ +.vscode/ diff --git a/wrap/Argument.cpp b/wrap/Argument.cpp deleted file mode 100644 index f85aed72e..000000000 --- a/wrap/Argument.cpp +++ /dev/null @@ -1,316 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Argument.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Argument.h" -#include "Class.h" - -#include - -#include -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -Argument Argument::expandTemplate(const TemplateSubstitution& ts) const { - Argument instArg = *this; - instArg.type = ts.tryToSubstitite(type); - return instArg; -} - -/* ************************************************************************* */ -ArgumentList ArgumentList::expandTemplate( - const TemplateSubstitution& ts) const { - ArgumentList instArgList; - for(const Argument& arg: *this) { - Argument instArg = arg.expandTemplate(ts); - instArgList.push_back(instArg); - } - return instArgList; -} - -/* ************************************************************************* */ -string Argument::matlabClass(const string& delim) const { - string result; - for(const string& ns: type.namespaces()) - result += ns + delim; - if (type.name() == "string" || type.name() == "unsigned char" - || type.name() == "char") - return result + "char"; - if (type.name() == "Vector" || type.name() == "Matrix") - return result + "double"; - if (type.name() == "int" || type.name() == "size_t") - return result + "numeric"; - if (type.name() == "bool") - return result + "logical"; - return result + type.name(); -} - -/* ************************************************************************* */ -void Argument::matlab_unwrap(FileWriter& file, const string& matlabName) const { - file.oss << " "; - - string cppType = type.qualifiedName("::"); - string matlabUniqueType = type.qualifiedName(); - bool isNotScalar = !type.isScalar(); - - // We cannot handle scalar non const references - if (!isNotScalar && is_ref && !is_const) { - throw std::runtime_error("Cannot unwrap a scalar non-const reference"); - } - - if (is_ptr && type.category != Qualified::EIGEN) - // A pointer: emit an "unwrap_shared_ptr" call which returns a pointer - file.oss << "boost::shared_ptr<" << cppType << "> " << name - << " = unwrap_shared_ptr< "; - else if (is_ref && isNotScalar && type.category != Qualified::EIGEN) - // A reference: emit an "unwrap_shared_ptr" call and de-reference the pointer - file.oss << cppType << "& " << name << " = *unwrap_shared_ptr< "; - else - // Not a pointer, or a reference to a scalar type. Therefore, emit an "unwrap" call - // unwrap is specified in matlab.h as a series of template specializations - // that know how to unpack the expected MATLAB object - // example: double tol = unwrap< double >(in[2]); - // example: Vector v = unwrap< Vector >(in[1]); - file.oss << cppType << " " << name << " = unwrap< "; - - file.oss << cppType << " >(" << matlabName; - if( (is_ptr || is_ref) && isNotScalar && type.category != Qualified::EIGEN) - file.oss << ", \"ptr_" << matlabUniqueType << "\""; - file.oss << ");" << endl; -} - -/* ************************************************************************* */ -void Argument::proxy_check(FileWriter& proxyFile, const string& s) const { - proxyFile.oss << "isa(" << s << ",'" << matlabClass(".") << "')"; - if (type.name() == "Vector") - proxyFile.oss << " && size(" << s << ",2)==1"; -} - -/* ************************************************************************* */ -void Argument::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - string cythonType = type.pxdClassName(); - if (cythonType == "This") cythonType = className; - else if (type.isEigen()) - cythonType = "const " + cythonType + "&"; - else if (type.match(templateArgs)) - cythonType = type.name(); - - // add modifier - if (!type.isEigen()) { - if (is_ptr) cythonType = "shared_ptr[" + cythonType + "]&"; - if (is_ref) cythonType = cythonType + "&"; - if (is_const) cythonType = "const " + cythonType; - } - - file.oss << cythonType << " " << name; -} - -/* ************************************************************************* */ -void Argument::emit_cython_pyx(FileWriter& file) const { - file.oss << type.pyxArgumentType() << " " << name; -} - -/* ************************************************************************* */ -std::string Argument::pyx_convertEigenTypeAndStorageOrder() const { - if (!type.isEigen()) - return ""; - return name + " = " + name + ".astype(float, order=\'F\', copy=False)"; -} - -/* ************************************************************************* */ -std::string Argument::pyx_asParam() const { - string cythonType = type.pxdClassName(); - string cythonVar; - if (type.isNonBasicType()) { - cythonVar = name + "." + type.shared_pxd_obj_in_pyx(); - if (!is_ptr) cythonVar = "deref(" + cythonVar + ")"; - } else if (type.isEigen()) { - cythonVar = "<" + cythonType + ">" + "(Map[" + cythonType + "](" + name + "))"; - } else { - cythonVar = name; - } - return cythonVar; -} - -/* ************************************************************************* */ -string ArgumentList::types() const { - string str; - bool first = true; - for(Argument arg: *this) { - if (!first) - str += ","; - str += arg.type.name(); - first = false; - } - return str; -} - -/* ************************************************************************* */ -string ArgumentList::signature() const { - string sig; - bool cap = false; - - for(Argument arg: *this) { - for(char ch: arg.type.name()) - if (isupper(ch)) { - sig += ch; - //If there is a capital letter, we don't want to read it below - cap = true; - } - if (!cap) - sig += arg.type.name()[0]; - //Reset to default - cap = false; - } - - return sig; -} - -/* ************************************************************************* */ -string ArgumentList::names() const { - string str; - bool first = true; - for(Argument arg: *this) { - if (!first) - str += ","; - str += arg.name; - first = false; - } - return str; -} - -/* ************************************************************************* */ -bool ArgumentList::allScalar() const { - for(Argument arg: *this) - if (!arg.type.isScalar()) - return false; - return true; -} - -/* ************************************************************************* */ -void ArgumentList::matlab_unwrap(FileWriter& file, int start) const { - int index = start; - for(Argument arg: *this) { - stringstream buf; - buf << "in[" << index << "]"; - arg.matlab_unwrap(file, buf.str()); - index++; - } -} - -/* ************************************************************************* */ -void ArgumentList::emit_prototype(FileWriter& file, const string& name) const { - file.oss << name << "("; - bool first = true; - for(Argument arg: *this) { - if (!first) - file.oss << ", "; - file.oss << arg.type.name() << " " << arg.name; - first = false; - } - file.oss << ")"; -} - -/* ************************************************************************* */ -void ArgumentList::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - for (size_t j = 0; j(__params[" + std::to_string(j) + "])\n"; - return s; -} - -/* ************************************************************************* */ -void ArgumentList::proxy_check(FileWriter& proxyFile) const { - // Check nr of arguments - proxyFile.oss << "if length(varargin) == " << size(); - if (size() > 0) - proxyFile.oss << " && "; - // ...and their type.names - bool first = true; - for (size_t i = 0; i < size(); i++) { - if (!first) - proxyFile.oss << " && "; - string s = "varargin{" + boost::lexical_cast(i + 1) + "}"; - (*this)[i].proxy_check(proxyFile, s); - first = false; - } - proxyFile.oss << "\n"; -} - -/* ************************************************************************* */ - diff --git a/wrap/Argument.h b/wrap/Argument.h deleted file mode 100644 index c08eb0be9..000000000 --- a/wrap/Argument.h +++ /dev/null @@ -1,242 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Argument.h - * @brief arguments to constructors and methods - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include "TemplateSubstitution.h" -#include "FileWriter.h" -#include "ReturnValue.h" - -namespace wrap { - -/// Argument class -struct Argument { - Qualified type; - std::string name; - bool is_const, is_ref, is_ptr; - - Argument() : - is_const(false), is_ref(false), is_ptr(false) { - } - - Argument(const Qualified& t, const std::string& n) : - type(t), name(n), is_const(false), is_ref(false), is_ptr(false) { - } - - bool isSameSignature(const Argument& other) const { - return type == other.type - && is_const == other.is_const && is_ref == other.is_ref - && is_ptr == other.is_ptr; - } - - bool operator==(const Argument& other) const { - return type == other.type && name == other.name - && is_const == other.is_const && is_ref == other.is_ref - && is_ptr == other.is_ptr; - } - - Argument expandTemplate(const TemplateSubstitution& ts) const; - - /// return MATLAB class for use in isa(x,class) - std::string matlabClass(const std::string& delim = "") const; - - /// MATLAB code generation, MATLAB to C++ - void matlab_unwrap(FileWriter& file, const std::string& matlabName) const; - - /** - * emit checking argument to MATLAB proxy - * @param proxyFile output stream - */ - void proxy_check(FileWriter& proxyFile, const std::string& s) const; - - /** - * emit arguments for cython pxd - * @param file output stream - */ - void emit_cython_pxd(FileWriter& file, const std::string& className, - const std::vector& templateArgs) const; - void emit_cython_pyx(FileWriter& file) const; - std::string pyx_asParam() const; - std::string pyx_convertEigenTypeAndStorageOrder() const; - - friend std::ostream& operator<<(std::ostream& os, const Argument& arg) { - os << (arg.is_const ? "const " : "") << arg.type << (arg.is_ptr ? "*" : "") - << (arg.is_ref ? "&" : ""); - return os; - } - -}; - -/// Argument list is just a container with Arguments -struct ArgumentList: public std::vector { - - /// create a comma-separated string listing all argument types (not used) - std::string types() const; - - /// create a short "signature" string - std::string signature() const; - - /// create a comma-separated string listing all argument names, used in m-files - std::string names() const; - - /// Check if all arguments scalar - bool allScalar() const; - - ArgumentList expandTemplate(const TemplateSubstitution& ts) const; - - bool isSameSignature(const ArgumentList& other) const { - for(size_t i = 0; i& templateArgs) const; - void emit_cython_pyx(FileWriter& file) const; - std::string pyx_asParams() const; - std::string pyx_paramsList() const; - std::string pyx_castParamsToPythonType(const std::string& indent) const; - std::string pyx_convertEigenTypeAndStorageOrder(const std::string& indent) const; - - /** - * emit checking arguments to MATLAB proxy - * @param proxyFile output stream - */ - void proxy_check(FileWriter& proxyFile) const; - - /// Output stream operator - friend std::ostream& operator<<(std::ostream& os, - const ArgumentList& argList) { - os << "("; - if (argList.size() > 0) - os << argList.front(); - if (argList.size() > 1) - for (size_t i = 1; i < argList.size(); i++) - os << ", " << argList[i]; - os << ")"; - return os; - } - -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ArgumentGrammar: public classic::grammar { - - wrap::Argument& result_; ///< successful parse will be placed in here - TypeGrammar argument_type_g; ///< Type parser for Argument::type - - /// Construct type grammar and specify where result is placed - ArgumentGrammar(wrap::Argument& result) : - result_(result), argument_type_g(result.type) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - typedef classic::rule Rule; - - Rule argument_p; - - definition(ArgumentGrammar const& self) { - using namespace classic; - - // NOTE: allows for pointers to all types - // Slightly more permissive than before on basis/eigen type qualification - // Also, currently parses Point2*&, can't make it work otherwise :-( - argument_p = !str_p("const")[assign_a(self.result_.is_const, T)] // - >> self.argument_type_g // - >> !ch_p('*')[assign_a(self.result_.is_ptr, T)] - >> !ch_p('&')[assign_a(self.result_.is_ref, T)] - >> BasicRules::name_p[assign_a(self.result_.name)]; - } - - Rule const& start() const { - return argument_p; - } - - }; -}; -// ArgumentGrammar - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ArgumentListGrammar: public classic::grammar { - - wrap::ArgumentList& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - ArgumentListGrammar(wrap::ArgumentList& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition { - - const Argument arg0; ///< used to reset arg - Argument arg; ///< temporary argument for use during parsing - ArgumentGrammar argument_g; ///< single Argument parser - - classic::rule argument_p, argumentList_p; - - definition(ArgumentListGrammar const& self) : - argument_g(arg) { - using namespace classic; - - argument_p = argument_g // - [classic::push_back_a(self.result_, arg)] // - [assign_a(arg, arg0)]; - - argumentList_p = '(' >> !argument_p >> *(',' >> argument_p) >> ')'; - } - - classic::rule const& start() const { - return argumentList_p; - } - - }; -}; -// ArgumentListGrammar - -/* ************************************************************************* */ - -}// \namespace wrap - diff --git a/wrap/CMakeLists.txt b/wrap/CMakeLists.txt deleted file mode 100644 index c04a44edb..000000000 --- a/wrap/CMakeLists.txt +++ /dev/null @@ -1,45 +0,0 @@ -# Build/install Wrap - -set(WRAP_BOOST_LIBRARIES - Boost::system - Boost::filesystem - Boost::thread -) - -# Allow for disabling serialization to handle errors related to Clang's linker -option(GTSAM_WRAP_SERIALIZATION "If enabled, allows for wrapped objects to be saved via boost.serialization" ON) - -# Build the executable itself -file(GLOB wrap_srcs "*.cpp") -file(GLOB wrap_headers "*.h") -list(REMOVE_ITEM wrap_srcs ${CMAKE_CURRENT_SOURCE_DIR}/wrap.cpp) -add_library(wrap_lib STATIC ${wrap_srcs} ${wrap_headers}) -target_include_directories(wrap_lib PUBLIC - $ -) -if (NOT GTSAM_WRAP_SERIALIZATION) - target_compile_definitions(wrap_lib PUBLIC -DWRAP_DISABLE_SERIALIZE) -endif() - -# Apply build flags: -gtsam_apply_build_flags(wrap_lib) - -target_link_libraries(wrap_lib ${WRAP_BOOST_LIBRARIES}) -gtsam_assign_source_folders(${wrap_srcs} ${wrap_headers}) -add_executable(wrap wrap.cpp) -target_link_libraries(wrap PRIVATE wrap_lib) - -# Set folder in Visual Studio -file(RELATIVE_PATH relative_path "${PROJECT_SOURCE_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}") -set_target_properties(wrap_lib wrap PROPERTIES FOLDER "${relative_path}") - -# Install wrap binary and export target -install(TARGETS wrap EXPORT GTSAM-exports DESTINATION ${CMAKE_INSTALL_BINDIR}) -list(APPEND GTSAM_EXPORTED_TARGETS wrap) -set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) - -# Install matlab header -install(FILES matlab.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/wrap) - -# Build tests -add_subdirectory(tests) diff --git a/wrap/Class.cpp b/wrap/Class.cpp deleted file mode 100644 index 65ce9eab7..000000000 --- a/wrap/Class.cpp +++ /dev/null @@ -1,897 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Class.cpp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Class.h" -#include "utilities.h" -#include "Argument.h" -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include // std::ostream_iterator -//#include // on Linux GCC: fails with error regarding needing C++0x std flags -//#include // same failure as above -#include // works on Linux GCC -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void Class::assignParent(const Qualified& parent) { - parentClass.reset(parent); -} - -/* ************************************************************************* */ -boost::optional Class::qualifiedParent() const { - boost::optional result = boost::none; - if (parentClass) - result = parentClass->qualifiedName("::"); - return result; -} - -/* ************************************************************************* */ -static void handleException(const out_of_range& oor, - const Class::Methods& methods) { - cerr << "Class::method: key not found: " << oor.what() << ", methods are:\n"; - using boost::adaptors::map_keys; - ostream_iterator out_it(cerr, "\n"); - boost::copy(methods | map_keys, out_it); -} - -/* ************************************************************************* */ -// Method& Class::mutableMethod(Str key) { -// try { -// return methods_.at(key); -// } catch (const out_of_range& oor) { -// handleException(oor, methods_); -// throw runtime_error("Internal error in wrap"); -// } -// } - -/* ************************************************************************* */ -const Method& Class::method(Str key) const { - try { - return methods_.at(key); - } catch (const out_of_range& oor) { - handleException(oor, methods_); - throw runtime_error("Internal error in wrap"); - } -} - -/* ************************************************************************* */ -void Class::matlab_proxy(Str toolboxPath, Str wrapperName, - const TypeAttributesTable& typeAttributes, FileWriter& wrapperFile, - vector& functionNames) const { - - // Create namespace folders - createNamespaceStructure(namespaces(), toolboxPath); - - // open destination classFile - string classFile = matlabName(toolboxPath); - FileWriter proxyFile(classFile, verbose_, "%"); - - // get the name of actual matlab object - const string matlabQualName = qualifiedName("."); - const string matlabUniqueName = qualifiedName(); - const string cppName = qualifiedName("::"); - - // emit class proxy code - // we want our class to inherit the handle class for memory purposes - const string parent = - parentClass ? parentClass->qualifiedName(".") : "handle"; - comment_fragment(proxyFile); - proxyFile.oss << "classdef " << name() << " < " << parent << endl; - proxyFile.oss << " properties\n"; - proxyFile.oss << " ptr_" << matlabUniqueName << " = 0\n"; - proxyFile.oss << " end\n"; - proxyFile.oss << " methods\n"; - - // Constructor - proxyFile.oss << " function obj = " << name() << "(varargin)\n"; - // Special pointer constructors - one in MATLAB to create an object and - // assign a pointer returned from a C++ function. In turn this MATLAB - // constructor calls a special C++ function that just adds the object to - // its collector. This allows wrapped functions to return objects in - // other wrap modules - to add these to their collectors the pointer is - // passed from one C++ module into matlab then back into the other C++ - // module. - pointer_constructor_fragments(proxyFile, wrapperFile, wrapperName, - functionNames); - wrapperFile.oss << "\n"; - - // Regular constructors - boost::optional cppBaseName = qualifiedParent(); - for (size_t i = 0; i < constructor.nrOverloads(); i++) { - ArgumentList args = constructor.argumentList(i); - const int id = (int) functionNames.size(); - constructor.proxy_fragment(proxyFile, wrapperName, (bool) parentClass, id, - args); - const string wrapFunctionName = constructor.wrapper_fragment(wrapperFile, - cppName, matlabUniqueName, cppBaseName, id, args); - wrapperFile.oss << "\n"; - functionNames.push_back(wrapFunctionName); - } - proxyFile.oss << " else\n"; - proxyFile.oss << " error('Arguments do not match any overload of " - << matlabQualName << " constructor');\n"; - proxyFile.oss << " end\n"; - if (parentClass) - proxyFile.oss << " obj = obj@" << parentClass->qualifiedName(".") - << "(uint64(" << ptr_constructor_key << "), base_ptr);\n"; - proxyFile.oss << " obj.ptr_" << matlabUniqueName << " = my_ptr;\n"; - proxyFile.oss << " end\n\n"; - - // Deconstructor - { - const int id = (int) functionNames.size(); - deconstructor.proxy_fragment(proxyFile, wrapperName, matlabUniqueName, id); - proxyFile.oss << "\n"; - const string functionName = deconstructor.wrapper_fragment(wrapperFile, - cppName, matlabUniqueName, id); - wrapperFile.oss << "\n"; - functionNames.push_back(functionName); - } - proxyFile.oss - << " function display(obj), obj.print(''); end\n %DISPLAY Calls print on the object\n"; - proxyFile.oss - << " function disp(obj), obj.display; end\n %DISP Calls print on the object\n"; - - // Methods - for(const Methods::value_type& name_m: methods_) { - const Method& m = name_m.second; - m.proxy_wrapper_fragments(proxyFile, wrapperFile, cppName, matlabQualName, - matlabUniqueName, wrapperName, typeAttributes, functionNames); - proxyFile.oss << "\n"; - wrapperFile.oss << "\n"; - } - if (hasSerialization) - serialization_fragments(proxyFile, wrapperFile, wrapperName, functionNames); - - proxyFile.oss << " end\n"; - proxyFile.oss << "\n"; - proxyFile.oss << " methods(Static = true)\n"; - - // Static methods - for(const StaticMethods::value_type& name_m: static_methods) { - const StaticMethod& m = name_m.second; - m.proxy_wrapper_fragments(proxyFile, wrapperFile, cppName, matlabQualName, - matlabUniqueName, wrapperName, typeAttributes, functionNames); - proxyFile.oss << "\n"; - wrapperFile.oss << "\n"; - } - if (hasSerialization) - deserialization_fragments(proxyFile, wrapperFile, wrapperName, - functionNames); - - proxyFile.oss << " end\n"; - proxyFile.oss << "end\n"; - - // Close file - proxyFile.emit(true); -} - -/* ************************************************************************* */ -void Class::pointer_constructor_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - vector& functionNames) const { - - const string matlabUniqueName = qualifiedName(); - const string cppName = qualifiedName("::"); - - const int collectorInsertId = (int) functionNames.size(); - const string collectorInsertFunctionName = matlabUniqueName - + "_collectorInsertAndMakeBase_" - + boost::lexical_cast(collectorInsertId); - functionNames.push_back(collectorInsertFunctionName); - - int upcastFromVoidId; - string upcastFromVoidFunctionName; - if (isVirtual) { - upcastFromVoidId = (int) functionNames.size(); - upcastFromVoidFunctionName = matlabUniqueName + "_upcastFromVoid_" - + boost::lexical_cast(upcastFromVoidId); - functionNames.push_back(upcastFromVoidFunctionName); - } - - // MATLAB constructor that assigns pointer to matlab object then calls c++ - // function to add the object to the collector. - if (isVirtual) { - proxyFile.oss - << " if (nargin == 2 || (nargin == 3 && strcmp(varargin{3}, 'void')))"; - } else { - proxyFile.oss << " if nargin == 2"; - } - proxyFile.oss << " && isa(varargin{1}, 'uint64') && varargin{1} == uint64(" - << ptr_constructor_key << ")\n"; - if (isVirtual) { - proxyFile.oss << " if nargin == 2\n"; - proxyFile.oss << " my_ptr = varargin{2};\n"; - proxyFile.oss << " else\n"; - proxyFile.oss << " my_ptr = " << wrapperName << "(" - << upcastFromVoidId << ", varargin{2});\n"; - proxyFile.oss << " end\n"; - } else { - proxyFile.oss << " my_ptr = varargin{2};\n"; - } - if (!parentClass) // If this class has a base class, we'll get a base class pointer back - proxyFile.oss << " "; - else - proxyFile.oss << " base_ptr = "; - proxyFile.oss << wrapperName << "(" << collectorInsertId << ", my_ptr);\n"; // Call collector insert and get base class ptr - - // C++ function to add pointer from MATLAB to collector. The pointer always - // comes from a C++ return value; this mechanism allows the object to be added - // to a collector in a different wrap module. If this class has a base class, - // a new pointer to the base class is allocated and returned. - wrapperFile.oss << "void " << collectorInsertFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - wrapperFile.oss << "{\n"; - wrapperFile.oss << " mexAtExit(&_deleteAllObjects);\n"; - // Typedef boost::shared_ptr - wrapperFile.oss << " typedef boost::shared_ptr<" << cppName << "> Shared;\n"; - wrapperFile.oss << "\n"; - // Get self pointer passed in - wrapperFile.oss - << " Shared *self = *reinterpret_cast (mxGetData(in[0]));\n"; - // Add to collector - wrapperFile.oss << " collector_" << matlabUniqueName << ".insert(self);\n"; - // If we have a base class, return the base class pointer (MATLAB will call the base class collectorInsertAndMakeBase to add this to the collector and recurse the heirarchy) - boost::optional cppBaseName = qualifiedParent(); - if (cppBaseName) { - wrapperFile.oss << "\n"; - wrapperFile.oss << " typedef boost::shared_ptr<" << *cppBaseName - << "> SharedBase;\n"; - wrapperFile.oss - << " out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL);\n"; - wrapperFile.oss - << " *reinterpret_cast(mxGetData(out[0])) = new SharedBase(*self);\n"; - } - wrapperFile.oss << "}\n"; - - // If this is a virtual function, C++ function to dynamic upcast it from a - // shared_ptr. This mechanism allows automatic dynamic creation of the - // real underlying derived-most class when a C++ method returns a virtual - // base class. - if (isVirtual) - wrapperFile.oss << "\n" - "void " << upcastFromVoidFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[]) {\n" - " mexAtExit(&_deleteAllObjects);\n" - " typedef boost::shared_ptr<" << cppName - << "> Shared;\n" - " boost::shared_ptr *asVoid = *reinterpret_cast**> (mxGetData(in[0]));\n" - " out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL);\n" - " Shared *self = new Shared(boost::static_pointer_cast<" << cppName - << ">(*asVoid));\n" - " *reinterpret_cast(mxGetData(out[0])) = self;\n" - "}\n"; -} - -/* ************************************************************************* */ -Class Class::expandTemplate(const TemplateSubstitution& ts) const { - Class inst = *this; - inst.methods_ = expandMethodTemplate(methods_, ts); - inst.static_methods = expandMethodTemplate(static_methods, ts); - inst.constructor = constructor.expandTemplate(ts); - inst.deconstructor.name = inst.name(); - return inst; -} - -/* ************************************************************************* */ -vector Class::expandTemplate(Str templateArg, - const vector& instantiations) const { - vector result; - for(const Qualified& instName: instantiations) { - Qualified expandedClass = (Qualified) (*this); - expandedClass.expand(instName.name()); - const TemplateSubstitution ts(templateArg, instName, expandedClass); - Class inst = expandTemplate(ts); - inst.name_ = expandedClass.name(); - inst.templateArgs.clear(); - inst.typedefName = qualifiedName("::") + "<" + instName.qualifiedName("::") - + ">"; - inst.templateInstTypeList.push_back(instName); - inst.templateClass = *this; - result.push_back(inst); - } - return result; -} - -/* ************************************************************************* */ -vector Class::expandTemplate(Str templateArg, - const vector& integers) const { - vector result; - for(int i: integers) { - Qualified expandedClass = (Qualified) (*this); - stringstream ss; ss << i; - string instName = ss.str(); - expandedClass.expand(instName); - const TemplateSubstitution ts(templateArg, instName, expandedClass); - Class inst = expandTemplate(ts); - inst.name_ = expandedClass.name(); - inst.templateArgs.clear(); - inst.typedefName = qualifiedName("::") + "<" + instName + ">"; - result.push_back(inst); - } - return result; -} - -/* ************************************************************************* */ -void Class::addMethod(bool verbose, bool is_const, Str methodName, - const ArgumentList& argumentList, - const ReturnValue& returnValue, const Template& tmplate) { - // Check if templated - if (tmplate.valid()) { - try { - templateMethods_[methodName].addOverload(methodName, argumentList, - returnValue, is_const, - tmplate.argName(), verbose); - } catch (const std::runtime_error& e) { - throw std::runtime_error("Class::addMethod: error adding " + name_ + - "::" + methodName + "\n" + e.what()); - } - // Create method to expand - // For all values of the template argument, create a new method - for (const Qualified& instName : tmplate.argValues()) { - const TemplateSubstitution ts(tmplate.argName(), instName, *this); - // substitute template in arguments - ArgumentList expandedArgs = argumentList.expandTemplate(ts); - // do the same for return type - ReturnValue expandedRetVal = returnValue.expandTemplate(ts); - // Now stick in new overload stack with expandedMethodName key - // but note we use the same, unexpanded methodName in overload - string expandedMethodName = methodName + instName.name(); - try { - methods_[expandedMethodName].addOverload(methodName, expandedArgs, - expandedRetVal, is_const, - instName, verbose); - } catch (const std::runtime_error& e) { - throw std::runtime_error("Class::addMethod: error adding " + name_ + - "::" + expandedMethodName + "\n" + e.what()); - } - } - } else { - try { - // just add overload - methods_[methodName].addOverload(methodName, argumentList, returnValue, - is_const, boost::none, verbose); - nontemplateMethods_[methodName].addOverload(methodName, argumentList, - returnValue, is_const, - boost::none, verbose); - } catch (const std::runtime_error& e) { - throw std::runtime_error("Class::addMethod: error adding " + name_ + - "::" + methodName + "\n" + e.what()); - } - } -} - -/* ************************************************************************* */ -void Class::erase_serialization(Methods& methods) { - Methods::iterator it = methods.find("serializable"); - if (it != methods.end()) { -#ifndef WRAP_DISABLE_SERIALIZE - isSerializable = true; -#else - // cout << "Ignoring serializable() flag in class " << name << endl; -#endif - methods.erase(it); - } - - it = methods.find("serialize"); - if (it != methods.end()) { -#ifndef WRAP_DISABLE_SERIALIZE - isSerializable = true; - hasSerialization = true; -#else - // cout << "Ignoring serialize() flag in class " << name << endl; -#endif - methods.erase(it); - } -} - -void Class::erase_serialization() { - erase_serialization(methods_); - erase_serialization(nontemplateMethods_); -} - -/* ************************************************************************* */ -void Class::verifyAll(vector& validTypes, bool& hasSerialiable) const { - - hasSerialiable |= isSerializable; - - // verify all of the function arguments - //TODO:verifyArguments(validTypes, constructor.args_list); - verifyArguments(validTypes, static_methods); - verifyArguments(validTypes, methods_); - - // verify function return types - verifyReturnTypes(validTypes, static_methods); - verifyReturnTypes(validTypes, methods_); - - // verify parents - boost::optional parent = qualifiedParent(); - if (parent - && find(validTypes.begin(), validTypes.end(), *parent) - == validTypes.end()) - throw DependencyMissing(*parent, qualifiedName("::")); -} - -/* ************************************************************************* */ -void Class::appendInheritedMethods(const Class& cls, - const vector& classes) { - - if (cls.parentClass) { - - // Find parent - for(const Class& parent: classes) { - // We found a parent class for our parent, TODO improve ! - if (parent.name() == cls.parentClass->name()) { - methods_.insert(parent.methods_.begin(), parent.methods_.end()); - appendInheritedMethods(parent, classes); - } - } - } -} - -/* ************************************************************************* */ -void Class::removeInheritedNontemplateMethods(vector& classes) { - if (!parentClass) return; - // Find parent - auto parentIt = std::find_if(classes.begin(), classes.end(), - [&](const Class& cls) { return cls.name() == parentClass->name(); }); - if (parentIt == classes.end()) return; // ignore if parent not found - Class& parent = *parentIt; - - // Only check nontemplateMethods_ - for(const string& methodName: nontemplateMethods_ | boost::adaptors::map_keys) { - // check if the method exists in its parent - // Check against parent's methods_ because all the methods of grand - // parent and grand-grand-parent, etc. are already included there - // This is to avoid looking into higher level grand parents... - auto it = parent.methods_.find(methodName); - if (it == parent.methods_.end()) continue; // if not: ignore! - - Method& parentMethod = it->second; - Method& method = nontemplateMethods_[methodName]; - // check if they have the same modifiers (const/static/templateArgs) - if (!method.isSameModifiers(parentMethod)) continue; // if not: ignore - - // check and remove duplicate overloads - auto methodOverloads = boost::combine(method.returnVals_, method.argLists_); - auto parentMethodOverloads = boost::combine(parentMethod.returnVals_, parentMethod.argLists_); - auto result = boost::remove_if( - methodOverloads, - [&](boost::tuple const& overload) { - bool found = std::find_if( - parentMethodOverloads.begin(), - parentMethodOverloads.end(), - [&](boost::tuple const& - parentOverload) { - return overload.get<0>() == parentOverload.get<0>() && - overload.get<1>().isSameSignature(parentOverload.get<1>()); - }) != parentMethodOverloads.end(); - return found; - }); - // remove all duplicate overloads - method.returnVals_.erase(boost::get<0>(result.get_iterator_tuple()), - method.returnVals_.end()); - method.argLists_.erase(boost::get<1>(result.get_iterator_tuple()), - method.argLists_.end()); - } - // [Optional] remove the entire method if it has no overload - for (auto it = nontemplateMethods_.begin(), ite = nontemplateMethods_.end(); it != ite;) - if (it->second.nrOverloads() == 0) it = nontemplateMethods_.erase(it); else ++it; -} - -/* ************************************************************************* */ -string Class::getTypedef() const { - string result; - for(Str namesp: namespaces()) { - result += ("namespace " + namesp + " { "); - } - result += ("typedef " + typedefName + " " + name() + ";"); - for (size_t i = 0; i < namespaces().size(); ++i) { - result += " }"; - } - return result; -} - -/* ************************************************************************* */ -void Class::comment_fragment(FileWriter& proxyFile) const { - proxyFile.oss << "%class " << name() << ", see Doxygen page for details\n"; - proxyFile.oss - << "%at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html\n"; - - constructor.comment_fragment(proxyFile); - - if (!methods_.empty()) - proxyFile.oss << "%\n%-------Methods-------\n"; - for(const Methods::value_type& name_m: methods_) - name_m.second.comment_fragment(proxyFile); - - if (!static_methods.empty()) - proxyFile.oss << "%\n%-------Static Methods-------\n"; - for(const StaticMethods::value_type& name_m: static_methods) - name_m.second.comment_fragment(proxyFile); - - if (hasSerialization) { - proxyFile.oss << "%\n%-------Serialization Interface-------\n"; - proxyFile.oss << "%string_serialize() : returns string\n"; - proxyFile.oss << "%string_deserialize(string serialized) : returns " - << name() << "\n"; - } - - proxyFile.oss << "%\n"; -} - -/* ************************************************************************* */ -void Class::serialization_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - vector& functionNames) const { - -//void Point3_string_serialize_17(int nargout, mxArray *out[], int nargin, const mxArray *in[]) -//{ -// typedef boost::shared_ptr Shared; -// checkArguments("string_serialize",nargout,nargin-1,0); -// Shared obj = unwrap_shared_ptr(in[0], "ptr_Point3"); -// ostringstream out_archive_stream; -// boost::archive::text_oarchive out_archive(out_archive_stream); -// out_archive << *obj; -// out[0] = wrap< string >(out_archive_stream.str()); -//} - - int serialize_id = functionNames.size(); - const string matlabQualName = qualifiedName("."); - const string matlabUniqueName = qualifiedName(); - const string cppClassName = qualifiedName("::"); - const string wrapFunctionNameSerialize = matlabUniqueName - + "_string_serialize_" + boost::lexical_cast(serialize_id); - functionNames.push_back(wrapFunctionNameSerialize); - - // call - //void Point3_string_serialize_17(int nargout, mxArray *out[], int nargin, const mxArray *in[]) - wrapperFile.oss << "void " << wrapFunctionNameSerialize - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - wrapperFile.oss << "{\n"; - wrapperFile.oss << " typedef boost::shared_ptr<" << cppClassName - << "> Shared;" << endl; - - // check arguments - for serialize, no arguments - // example: checkArguments("string_serialize",nargout,nargin-1,0); - wrapperFile.oss - << " checkArguments(\"string_serialize\",nargout,nargin-1,0);\n"; - - // get class pointer - // example: Shared obj = unwrap_shared_ptr(in[0], "ptr_Point3"); - wrapperFile.oss << " Shared obj = unwrap_shared_ptr<" << cppClassName - << ">(in[0], \"ptr_" << matlabUniqueName << "\");" << endl; - - // Serialization boilerplate - wrapperFile.oss << " ostringstream out_archive_stream;\n"; - wrapperFile.oss - << " boost::archive::text_oarchive out_archive(out_archive_stream);\n"; - wrapperFile.oss << " out_archive << *obj;\n"; - wrapperFile.oss << " out[0] = wrap< string >(out_archive_stream.str());\n"; - - // finish - wrapperFile.oss << "}\n"; - - // Generate code for matlab function -// function varargout string_serialize(this, varargin) -// % STRING_SERIALIZE usage: string_serialize() : returns string -// % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html -// if length(varargin) == 0 -// varargout{1} = geometry_wrapper(15, this, varargin{:}); -// else -// error('Arguments do not match any overload of function Point3.string_serialize'); -// end -// end - - proxyFile.oss - << " function varargout = string_serialize(this, varargin)\n"; - proxyFile.oss - << " % STRING_SERIALIZE usage: string_serialize() : returns string\n"; - proxyFile.oss - << " % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html\n"; - proxyFile.oss << " if length(varargin) == 0\n"; - proxyFile.oss << " varargout{1} = " << wrapperName << "(" - << boost::lexical_cast(serialize_id) << ", this, varargin{:});\n"; - proxyFile.oss << " else\n"; - proxyFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << ".string_serialize');\n"; - proxyFile.oss << " end\n"; - proxyFile.oss << " end\n\n"; - - // Generate code for matlab save function -// function sobj = saveobj(obj) -// % SAVEOBJ Saves the object to a matlab-readable format -// sobj = obj.string_serialize(); -// end - - proxyFile.oss << " function sobj = saveobj(obj)\n"; - proxyFile.oss - << " % SAVEOBJ Saves the object to a matlab-readable format\n"; - proxyFile.oss << " sobj = obj.string_serialize();\n"; - proxyFile.oss << " end\n"; -} - -/* ************************************************************************* */ -void Class::deserialization_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - vector& functionNames) const { - //void Point3_string_deserialize_18(int nargout, mxArray *out[], int nargin, const mxArray *in[]) - //{ - // typedef boost::shared_ptr Shared; - // checkArguments("Point3.string_deserialize",nargout,nargin,1); - // string serialized = unwrap< string >(in[0]); - // istringstream in_archive_stream(serialized); - // boost::archive::text_iarchive in_archive(in_archive_stream); - // Shared output(new Point3(0,0,0)); - // in_archive >> *output; - // out[0] = wrap_shared_ptr(output,"Point3", false); - //} - int deserialize_id = functionNames.size(); - const string matlabQualName = qualifiedName("."); - const string matlabUniqueName = qualifiedName(); - const string cppClassName = qualifiedName("::"); - const string wrapFunctionNameDeserialize = matlabUniqueName - + "_string_deserialize_" + boost::lexical_cast(deserialize_id); - functionNames.push_back(wrapFunctionNameDeserialize); - - // call - wrapperFile.oss << "void " << wrapFunctionNameDeserialize - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - wrapperFile.oss << "{\n"; - wrapperFile.oss << " typedef boost::shared_ptr<" << cppClassName - << "> Shared;" << endl; - - // check arguments - for deserialize, 1 string argument - wrapperFile.oss << " checkArguments(\"" << matlabUniqueName - << ".string_deserialize\",nargout,nargin,1);\n"; - - // string argument with deserialization boilerplate - wrapperFile.oss << " string serialized = unwrap< string >(in[0]);\n"; - wrapperFile.oss << " istringstream in_archive_stream(serialized);\n"; - wrapperFile.oss - << " boost::archive::text_iarchive in_archive(in_archive_stream);\n"; - wrapperFile.oss << " Shared output(new " << cppClassName << "());\n"; - wrapperFile.oss << " in_archive >> *output;\n"; - wrapperFile.oss << " out[0] = wrap_shared_ptr(output,\"" << matlabQualName - << "\", false);\n"; - wrapperFile.oss << "}\n"; - - // Generate matlab function -// function varargout = string_deserialize(varargin) -// % STRING_DESERIALIZE usage: string_deserialize() : returns Point3 -// % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html -// if length(varargin) == 1 -// varargout{1} = geometry_wrapper(18, varargin{:}); -// else -// error('Arguments do not match any overload of function Point3.string_deserialize'); -// end -// end - - proxyFile.oss << " function varargout = string_deserialize(varargin)\n"; - proxyFile.oss - << " % STRING_DESERIALIZE usage: string_deserialize() : returns " - << matlabQualName << "\n"; - proxyFile.oss - << " % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html\n"; - proxyFile.oss << " if length(varargin) == 1\n"; - proxyFile.oss << " varargout{1} = " << wrapperName << "(" - << boost::lexical_cast(deserialize_id) << ", varargin{:});\n"; - proxyFile.oss << " else\n"; - proxyFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << ".string_deserialize');\n"; - proxyFile.oss << " end\n"; - proxyFile.oss << " end\n\n"; - - // Generate matlab load function -// function obj = loadobj(sobj) -// % LOADOBJ Saves the object to a matlab-readable format -// obj = Point3.string_deserialize(sobj); -// end - - proxyFile.oss << " function obj = loadobj(sobj)\n"; - proxyFile.oss - << " % LOADOBJ Saves the object to a matlab-readable format\n"; - proxyFile.oss << " obj = " << matlabQualName - << ".string_deserialize(sobj);\n"; - proxyFile.oss << " end" << endl; -} - -/* ************************************************************************* */ -string Class::getSerializationExport() const { - //BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsamSharedDiagonal"); - return "BOOST_CLASS_EXPORT_GUID(" + qualifiedName("::") + ", \"" - + qualifiedName() + "\");"; -} - -/* ************************************************************************* */ -void Class::python_wrapper(FileWriter& wrapperFile) const { - wrapperFile.oss << "class_<" << name() << ">(\"" << name() << "\")\n"; - constructor.python_wrapper(wrapperFile, name()); - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.python_wrapper(wrapperFile, name()); - for(const Method& m: methods_ | boost::adaptors::map_values) - m.python_wrapper(wrapperFile, name()); - wrapperFile.oss << ";\n\n"; -} - -/* ************************************************************************* */ -void Class::emit_cython_pxd(FileWriter& pxdFile) const { - pxdFile.oss << "cdef extern from \"" << includeFile << "\""; - string ns = qualifiedNamespaces("::"); - if (!ns.empty()) - pxdFile.oss << " namespace \"" << ns << "\""; - pxdFile.oss << ":" << endl; - pxdFile.oss << " cdef cppclass " << pxdClassName() << " \"" << qualifiedName("::") << "\""; - if (templateArgs.size()>0) { - pxdFile.oss << "["; - for(size_t i = 0; ipxdClassName() << ")"; - pxdFile.oss << ":\n"; - - constructor.emit_cython_pxd(pxdFile, *this); - if (constructor.nrOverloads()>0) pxdFile.oss << "\n"; - - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.emit_cython_pxd(pxdFile, *this); - if (static_methods.size()>0) pxdFile.oss << "\n"; - - for(const Method& m: nontemplateMethods_ | boost::adaptors::map_values) - m.emit_cython_pxd(pxdFile, *this); - - for(const TemplateMethod& m: templateMethods_ | boost::adaptors::map_values) - m.emit_cython_pxd(pxdFile, *this); - size_t numMethods = constructor.nrOverloads() + static_methods.size() + - methods_.size() + templateMethods_.size(); - if (numMethods == 0) - pxdFile.oss << " pass\n"; -} -/* ************************************************************************* */ -void Class::emit_cython_wrapper_pxd(FileWriter& pxdFile) const { - pxdFile.oss << "\ncdef class " << pyxClassName(); - if (getParent()) - pxdFile.oss << "(" << getParent()->pyxClassName() << ")"; - pxdFile.oss << ":\n"; - pxdFile.oss << " cdef " << shared_pxd_class_in_pyx() << " " - << shared_pxd_obj_in_pyx() << "\n"; - // cyCreateFromShared - pxdFile.oss << " @staticmethod\n"; - pxdFile.oss << " cdef " << pyxClassName() << " cyCreateFromShared(const " - << shared_pxd_class_in_pyx() << "& other)\n"; - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.emit_cython_wrapper_pxd(pxdFile, *this); - if (static_methods.size()>0) pxdFile.oss << "\n"; -} - -/* ************************************************************************* */ -void Class::pyxInitParentObj(FileWriter& pyxFile, const std::string& pyObj, - const std::string& cySharedObj, - const std::vector& allClasses) const { - if (parentClass) { - pyxFile.oss << pyObj << "." << parentClass->shared_pxd_obj_in_pyx() << " = " - << "<" << parentClass->shared_pxd_class_in_pyx() << ">(" - << cySharedObj << ")\n"; - // Find the parent class with name "parentClass" and point its cython obj - // to the same pointer - auto parent_it = find_if(allClasses.begin(), allClasses.end(), - [this](const Class& cls) { - return cls.pxdClassName() == - this->parentClass->pxdClassName(); - }); - if (parent_it == allClasses.end()) { - cerr << "Can't find parent class: " << parentClass->pxdClassName(); - throw std::runtime_error("Parent class not found!"); - } - parent_it->pyxInitParentObj(pyxFile, pyObj, cySharedObj, allClasses); - } -} - -/* ************************************************************************* */ -void Class::pyxDynamicCast(FileWriter& pyxFile, const Class& curLevel, - const std::vector& allClasses) const { - std::string me = this->pyxClassName(), sharedMe = this->shared_pxd_class_in_pyx(); - if (curLevel.parentClass) { - std::string parent = curLevel.parentClass->pyxClassName(), - parentObj = curLevel.parentClass->shared_pxd_obj_in_pyx(), - parentCythonClass = curLevel.parentClass->pxd_class_in_pyx(); - pyxFile.oss << "def dynamic_cast_" << me << "_" << parent << "(" << parent - << " parent):\n"; - pyxFile.oss << " try:\n"; - pyxFile.oss << " return " << me << ".cyCreateFromShared(<" << sharedMe - << ">dynamic_pointer_cast[" << pxd_class_in_pyx() << "," - << parentCythonClass << "](parent." << parentObj - << "))\n"; - pyxFile.oss << " except:\n"; - pyxFile.oss << " raise TypeError('dynamic cast failed!')\n"; - // Move up higher to one level: Find the parent class with name "parentClass" - auto parent_it = find_if(allClasses.begin(), allClasses.end(), - [&curLevel](const Class& cls) { - return cls.pxdClassName() == - curLevel.parentClass->pxdClassName(); - }); - if (parent_it == allClasses.end()) { - cerr << "Can't find parent class: " << parentClass->pxdClassName(); - throw std::runtime_error("Parent class not found!"); - } - pyxDynamicCast(pyxFile, *parent_it, allClasses); - } -} - -/* ************************************************************************* */ -void Class::emit_cython_pyx(FileWriter& pyxFile, const std::vector& allClasses) const { - pyxFile.oss << "cdef class " << pyxClassName(); - if (parentClass) pyxFile.oss << "(" << parentClass->pyxClassName() << ")"; - pyxFile.oss << ":\n"; - - // __init___ - pyxFile.oss << " def __init__(self, *args, **kwargs):\n"; - pyxFile.oss << " cdef list __params\n"; - pyxFile.oss << " self." << shared_pxd_obj_in_pyx() << " = " << shared_pxd_class_in_pyx() << "()\n"; - pyxFile.oss << " if len(args)==0 and len(kwargs)==1 and kwargs.has_key('cyCreateFromShared'):\n return\n"; - - // Constructors - constructor.emit_cython_pyx(pyxFile, *this); - pyxFile.oss << " if (self." << shared_pxd_obj_in_pyx() << ".use_count()==0):\n"; - pyxFile.oss << " raise TypeError('" << pyxClassName() - << " construction failed!')\n"; - pyxInitParentObj(pyxFile, " self", "self." + shared_pxd_obj_in_pyx(), allClasses); - pyxFile.oss << "\n"; - - // cyCreateFromShared - pyxFile.oss << " @staticmethod\n"; - pyxFile.oss << " cdef " << pyxClassName() << " cyCreateFromShared(const " - << shared_pxd_class_in_pyx() << "& other):\n" - << " if other.get() == NULL:\n" - << " raise RuntimeError('Cannot create object from a nullptr!')\n" - << " cdef " << pyxClassName() << " return_value = " << pyxClassName() << "(cyCreateFromShared=True)\n" - << " return_value." << shared_pxd_obj_in_pyx() << " = other\n"; - pyxInitParentObj(pyxFile, " return_value", "other", allClasses); - pyxFile.oss << " return return_value" << "\n\n"; - - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.emit_cython_pyx(pyxFile, *this); - if (static_methods.size()>0) pyxFile.oss << "\n"; - - for(const Method& m: methods_ | boost::adaptors::map_values) - m.emit_cython_pyx(pyxFile, *this); - - pyxDynamicCast(pyxFile, *this, allClasses); - - pyxFile.oss << "\n\n"; -} - -/* ************************************************************************* */ diff --git a/wrap/Class.h b/wrap/Class.h deleted file mode 100644 index 3df37fe67..000000000 --- a/wrap/Class.h +++ /dev/null @@ -1,315 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Class.h - * @brief describe the C++ class that is being wrapped - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include "spirit.h" -#include "Template.h" -#include "Constructor.h" -#include "Deconstructor.h" -#include "Method.h" -#include "StaticMethod.h" -#include "TemplateMethod.h" -#include "TypeAttributesTable.h" - -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-variable" -#endif -#include -#include -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif - -namespace bl = boost::lambda; - -#include -#include - -#include -#include - -namespace wrap { - -/// Class has name, constructors, methods -class Class: public Qualified { - -public: - typedef const std::string& Str; - typedef std::map Methods; - typedef std::map StaticMethods; - typedef std::map TemplateMethods; - -private: - - boost::optional parentClass; ///< The *single* parent - Methods methods_; ///< Class methods, including all expanded/instantiated template methods -- to be serialized to matlab and Python classes in Cython pyx - Methods nontemplateMethods_; ///< only nontemplate methods -- to be serialized into Cython pxd - TemplateMethods templateMethods_; ///< only template methods -- to be serialized into Cython pxd - // Method& mutableMethod(Str key); - -public: - - StaticMethods static_methods; ///< Static methods - - // Then the instance variables are set directly by the Module constructor - std::vector templateArgs; ///< Template arguments - std::string typedefName; ///< The name to typedef *from*, if this class is actually a typedef, i.e. typedef [typedefName] [name] - std::vector templateInstTypeList; ///< the original typelist used to instantiate this class from a template. - ///< Empty if it's not an instantiation. Needed for template classes in Cython pxd. - boost::optional templateClass = boost::none; ///< qualified name of the original template class from which this class was instantiated. - ///< boost::none if not an instantiation. Needed for template classes in Cython pxd. - bool isVirtual; ///< Whether the class is part of a virtual inheritance chain - bool isSerializable; ///< Whether we can use boost.serialization to serialize the class - creates exports - bool hasSerialization; ///< Whether we should create the serialization functions - Constructor constructor; ///< Class constructors - Deconstructor deconstructor; ///< Deconstructor to deallocate C++ object - bool verbose_; ///< verbose flag - std::string includeFile; - - /// Constructor creates an empty class - Class(bool verbose = true) : - parentClass(boost::none), isVirtual(false), isSerializable(false), hasSerialization( - false), deconstructor(verbose), verbose_(verbose) { - } - - Class(const std::string& name, bool verbose = true) - : Qualified(name, Qualified::Category::CLASS), - parentClass(boost::none), - isVirtual(false), - isSerializable(false), - hasSerialization(false), - deconstructor(verbose), - verbose_(verbose) {} - - void assignParent(const Qualified& parent); - - boost::optional qualifiedParent() const; - boost::optional getParent() const { return parentClass; } - - size_t nrMethods() const { - return methods_.size(); - } - - const Method& method(Str key) const; - - bool exists(Str name) const { - return methods_.find(name) != methods_.end(); - } - - // And finally MATLAB code is emitted, methods below called by Module::matlab_code - void matlab_proxy(Str toolboxPath, Str wrapperName, - const TypeAttributesTable& typeAttributes, FileWriter& wrapperFile, - std::vector& functionNames) const; ///< emit proxy class - - Class expandTemplate(const TemplateSubstitution& ts) const; - - std::vector expandTemplate(Str templateArg, - const std::vector& instantiations) const; - - // Create new classes with integer template arguments - std::vector expandTemplate(Str templateArg, - const std::vector& integers) const; - - /// Add potentially overloaded, potentially templated method - void addMethod(bool verbose, bool is_const, Str methodName, - const ArgumentList& argumentList, const ReturnValue& returnValue, - const Template& tmplate); - - /// Post-process classes for serialization markers - void erase_serialization(); // non-const ! - void erase_serialization(Methods& methods); // non-const ! - - /// verify all of the function arguments - void verifyAll(std::vector& functionNames, - bool& hasSerialiable) const; - - void appendInheritedMethods(const Class& cls, - const std::vector& classes); - - void removeInheritedNontemplateMethods(std::vector& classes); - - /// The typedef line for this class, if this class is a typedef, otherwise returns an empty string. - std::string getTypedef() const; - - /// Returns the string for an export flag - std::string getSerializationExport() const; - - /// Creates a member function that performs serialization - void serialization_fragments(FileWriter& proxyFile, FileWriter& wrapperFile, - Str wrapperName, std::vector& functionNames) const; - - /// Creates a static member function that performs deserialization - void deserialization_fragments(FileWriter& proxyFile, FileWriter& wrapperFile, - Str wrapperName, std::vector& functionNames) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile) const; - - // emit cython wrapper - void emit_cython_pxd(FileWriter& pxdFile) const; - void emit_cython_wrapper_pxd(FileWriter& pxdFile) const; - void emit_cython_pyx(FileWriter& pyxFile, - const std::vector& allClasses) const; - void pyxInitParentObj(FileWriter& pyxFile, const std::string& pyObj, - const std::string& cySharedObj, - const std::vector& allClasses) const; - void pyxDynamicCast(FileWriter& pyxFile, const Class& curLevel, - const std::vector& allClasses) const; - - friend std::ostream& operator<<(std::ostream& os, const Class& cls) { - os << "class " << cls.name() << "{\n"; - os << cls.constructor << ";\n"; - for(const StaticMethod& m: cls.static_methods | boost::adaptors::map_values) - os << m << ";\n"; - for(const Method& m: cls.methods_ | boost::adaptors::map_values) - os << m << ";\n"; - os << "};" << std::endl; - return os; - } - -private: - - void pointer_constructor_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - std::vector& functionNames) const; - - void comment_fragment(FileWriter& proxyFile) const; -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ClassGrammar: public classic::grammar { - - Class& cls_; ///< successful parse will be placed in here - Template& template_; ///< result needs to be visible outside - - /// Construct type grammar and specify where result is placed - ClassGrammar(Class& cls, Template& t) : - cls_(cls), template_(t) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - using BasicRules::name_p; - using BasicRules::className_p; - using BasicRules::comments_p; - - // NOTE: allows for pointers to all types - ArgumentList args; - ArgumentListGrammar argumentList_g; - - Constructor constructor0, constructor; - - ReturnValue retVal0, retVal; - ReturnValueGrammar returnValue_g; - - Template methodTemplate; - TemplateGrammar methodTemplate_g, classTemplate_g; - - std::string methodName; - bool isConst, T, F; - - // Parent class - Qualified possibleParent; - TypeGrammar classParent_g; - - classic::rule constructor_p, methodName_p, method_p, - staticMethodName_p, static_method_p, templateList_p, classParent_p, - functions_p, class_p; - - definition(ClassGrammar const& self) : - argumentList_g(args), returnValue_g(retVal), // - methodTemplate_g(methodTemplate), classTemplate_g(self.template_), // - T(true), F(false), classParent_g(possibleParent) { - - using namespace classic; - bool verbose = false; // TODO - - // ConstructorGrammar - constructor_p = (className_p >> argumentList_g >> ';' >> !comments_p) // - [bl::bind(&Constructor::push_back, bl::var(constructor), - bl::var(args))] // - [clear_a(args)]; - - // MethodGrammar - methodName_p = lexeme_d[(upper_p | lower_p) >> *(alnum_p | '_')]; - - // gtsam::Values retract(const gtsam::VectorValues& delta) const; - method_p = !methodTemplate_g - >> (returnValue_g >> methodName_p[assign_a(methodName)] - >> argumentList_g >> !str_p("const")[assign_a(isConst, T)] >> ';' - >> *comments_p) // - [bl::bind(&Class::addMethod, bl::var(self.cls_), verbose, - bl::var(isConst), bl::var(methodName), bl::var(args), - bl::var(retVal), bl::var(methodTemplate))] // - [assign_a(retVal, retVal0)][clear_a(args)] // - [clear_a(methodTemplate)][assign_a(isConst, F)]; - - // StaticMethodGrammar - staticMethodName_p = lexeme_d[(upper_p | lower_p) >> *(alnum_p | '_')]; - - static_method_p = (str_p("static") >> returnValue_g - >> staticMethodName_p[assign_a(methodName)] >> argumentList_g >> ';' - >> *comments_p) // - [bl::bind(&StaticMethod::addOverload, - bl::var(self.cls_.static_methods)[bl::var(methodName)], - bl::var(methodName), bl::var(args), bl::var(retVal), boost::none, - verbose)] // - [assign_a(retVal, retVal0)][clear_a(args)]; - - // template - templateList_p = (str_p("template") >> '<' - >> name_p[push_back_a(self.cls_.templateArgs)] - >> *(',' >> name_p[push_back_a(self.cls_.templateArgs)]) >> '>'); - - // parse a full class - classParent_p = (':' >> classParent_g >> '{') // - [bl::bind(&Class::assignParent, bl::var(self.cls_), - bl::var(possibleParent))][clear_a(possibleParent)]; - - functions_p = constructor_p | method_p | static_method_p; - - // parse a full class - class_p = (!(classTemplate_g[push_back_a(self.cls_.templateArgs, - self.template_.argName())] | templateList_p) - >> !(str_p("virtual")[assign_a(self.cls_.isVirtual, T)]) - >> str_p("class") >> className_p[assign_a(self.cls_.name_)] - >> (classParent_p | '{') >> // - *(functions_p | comments_p) >> str_p("};")) // - [bl::bind(&Constructor::initializeOrCheck, bl::var(constructor), - bl::var(self.cls_.name_), boost::none, verbose)][assign_a( - self.cls_.constructor, constructor)] // - [assign_a(self.cls_.deconstructor.name, self.cls_.name_)] // - [assign_a(constructor, constructor0)]; - } - - classic::rule const& start() const { - return class_p; - } - - }; -}; -// ClassGrammar - -}// \namespace wrap - diff --git a/wrap/Constructor.cpp b/wrap/Constructor.cpp deleted file mode 100644 index 74719b289..000000000 --- a/wrap/Constructor.cpp +++ /dev/null @@ -1,160 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Constructor.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include -#include -#include - -#include - -#include "utilities.h" -#include "Constructor.h" -#include "Class.h" - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -string Constructor::matlab_wrapper_name(Str className) const { - string str = "new_" + className; - return str; -} - -/* ************************************************************************* */ -void Constructor::proxy_fragment(FileWriter& file, - const std::string& wrapperName, bool hasParent, - const int id, const ArgumentList args) const { - size_t nrArgs = args.size(); - // check for number of arguments... - file.oss << " elseif nargin == " << nrArgs; - if (nrArgs > 0) file.oss << " && "; - // ...and their types - bool first = true; - for (size_t i = 0; i < nrArgs; i++) { - if (!first) file.oss << " && "; - file.oss << "isa(varargin{" << i + 1 << "},'" << args[i].matlabClass(".") - << "')"; - first = false; - } - // emit code for calling constructor - if (hasParent) - file.oss << "\n [ my_ptr, base_ptr ] = "; - else - file.oss << "\n my_ptr = "; - file.oss << wrapperName << "(" << id; - // emit constructor arguments - for (size_t i = 0; i < nrArgs; i++) { - file.oss << ", "; - file.oss << "varargin{" << i + 1 << "}"; - } - file.oss << ");\n"; -} - -/* ************************************************************************* */ -string Constructor::wrapper_fragment(FileWriter& file, Str cppClassName, - Str matlabUniqueName, - boost::optional cppBaseClassName, - int id, const ArgumentList& al) const { - const string wrapFunctionName = - matlabUniqueName + "_constructor_" + boost::lexical_cast(id); - - file.oss << "void " << wrapFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])" - << endl; - file.oss << "{\n"; - file.oss << " mexAtExit(&_deleteAllObjects);\n"; - // Typedef boost::shared_ptr - file.oss << " typedef boost::shared_ptr<" << cppClassName << "> Shared;\n"; - file.oss << "\n"; - - // Check to see if there will be any arguments and remove {} for consiseness - if (al.size() > 0) al.matlab_unwrap(file); // unwrap arguments - file.oss << " Shared *self = new Shared(new " << cppClassName << "(" - << al.names() << "));" << endl; - file.oss << " collector_" << matlabUniqueName << ".insert(self);\n"; - - if (verbose_) - file.oss << " std::cout << \"constructed \" << self << std::endl;" << endl; - file.oss - << " out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL);" - << endl; - file.oss << " *reinterpret_cast (mxGetData(out[0])) = self;" - << endl; - - // If we have a base class, return the base class pointer (MATLAB will call - // the base class collectorInsertAndMakeBase to add this to the collector and - // recurse the heirarchy) - if (cppBaseClassName) { - file.oss << "\n"; - file.oss << " typedef boost::shared_ptr<" << *cppBaseClassName - << "> SharedBase;\n"; - file.oss << " out[1] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, " - "mxREAL);\n"; - file.oss << " *reinterpret_cast(mxGetData(out[1])) = new " - "SharedBase(*self);\n"; - } - - file.oss << "}" << endl; - - return wrapFunctionName; -} - -/* ************************************************************************* */ -void Constructor::python_wrapper(FileWriter& wrapperFile, Str className) const { - wrapperFile.oss << " .def(\"" << name_ << "\", &" << className - << "::" << name_ << ");\n"; -} - -/* ************************************************************************* */ -bool Constructor::hasDefaultConstructor() const { - for (size_t i = 0; i < nrOverloads(); i++) { - if (argumentList(i).size() == 0) return true; - } - return false; -} - -/* ************************************************************************* */ -void Constructor::emit_cython_pxd(FileWriter& pxdFile, const Class& cls) const { - for (size_t i = 0; i < nrOverloads(); i++) { - ArgumentList args = argumentList(i); - - // generate the constructor - pxdFile.oss << " " << cls.pxdClassName() << "("; - args.emit_cython_pxd(pxdFile, cls.pxdClassName(), cls.templateArgs); - pxdFile.oss << ") " << "except +\n"; - } -} - -/* ************************************************************************* */ -void Constructor::emit_cython_pyx(FileWriter& pyxFile, const Class& cls) const { - for (size_t i = 0; i < nrOverloads(); i++) { - ArgumentList args = argumentList(i); - pyxFile.oss << " try:\n"; - pyxFile.oss << pyx_resolveOverloadParams(args, true, 3); - pyxFile.oss - << argumentList(i).pyx_convertEigenTypeAndStorageOrder(" "); - - pyxFile.oss << " self." << cls.shared_pxd_obj_in_pyx() << " = " - << cls.shared_pxd_class_in_pyx() << "(new " << cls.pxd_class_in_pyx() - << "(" << args.pyx_asParams() << "))\n"; - pyxFile.oss << " except (AssertionError, ValueError):\n"; - pyxFile.oss << " pass\n"; - } -} - -/* ************************************************************************* */ diff --git a/wrap/Constructor.h b/wrap/Constructor.h deleted file mode 100644 index 172cd24a4..000000000 --- a/wrap/Constructor.h +++ /dev/null @@ -1,99 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Constructor.h - * @brief class describing a constructor + code generation - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#pragma once - -#include "OverloadedFunction.h" -#include -#include -#include - -namespace wrap { - -// Forward declaration -class Class; - -// Constructor class -struct Constructor: public OverloadedFunction { - - typedef const std::string& Str; - - /// Constructor creates an empty class - Constructor(bool verbose = false) { - verbose_ = verbose; - } - - Constructor expandTemplate(const TemplateSubstitution& ts) const { - Constructor inst = *this; - inst.argLists_ = expandArgumentListsTemplate(ts); - inst.name_ = ts.expandedClassName(); - return inst; - } - - /// return true if the default constructor exists - bool hasDefaultConstructor() const; - - // MATLAB code generation - // toolboxPath is main toolbox directory, e.g., ../matlab - // classFile is class proxy file, e.g., ../matlab/@Point2/Point2.m - - /// wrapper name - std::string matlab_wrapper_name(Str className) const; - - void comment_fragment(FileWriter& proxyFile) const { - if (nrOverloads() > 0) - proxyFile.oss << "%\n%-------Constructors-------\n"; - for (size_t i = 0; i < nrOverloads(); i++) { - proxyFile.oss << "%"; - argumentList(i).emit_prototype(proxyFile, name_); - proxyFile.oss << "\n"; - } - } - - /** - * Create fragment to select constructor in proxy class, e.g., - * if nargin == 2, obj.self = new_Pose3_RP(varargin{1},varargin{2}); end - */ - void proxy_fragment(FileWriter& file, Str wrapperName, bool hasParent, - const int id, const ArgumentList args) const; - - /// cpp wrapper - std::string wrapper_fragment(FileWriter& file, Str cppClassName, - Str matlabUniqueName, boost::optional cppBaseClassName, int id, - const ArgumentList& al) const; - - /// constructor function - void generate_construct(FileWriter& file, Str cppClassName, - std::vector& args_list) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile, Str className) const; - - // emit cython wrapper - void emit_cython_pxd(FileWriter& pxdFile, const Class& cls) const; - void emit_cython_pyx(FileWriter& pyxFile, const Class& cls) const; - - friend std::ostream& operator<<(std::ostream& os, const Constructor& m) { - for (size_t i = 0; i < m.nrOverloads(); i++) - os << m.name_ << m.argLists_[i]; - return os; - } - -}; - -} // \namespace wrap diff --git a/wrap/Deconstructor.cpp b/wrap/Deconstructor.cpp deleted file mode 100644 index 7bb366e3f..000000000 --- a/wrap/Deconstructor.cpp +++ /dev/null @@ -1,73 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Deconstructor.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include -#include - -#include - -#include "utilities.h" -#include "Deconstructor.h" - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -string Deconstructor::matlab_wrapper_name(const string& className) const { - string str = "delete_" + className; - return str; -} - -/* ************************************************************************* */ -void Deconstructor::proxy_fragment(FileWriter& file, - const std::string& wrapperName, - const std::string& matlabUniqueName, int id) const { - - file.oss << " function delete(obj)\n"; - file.oss << " " << wrapperName << "(" << id << ", obj.ptr_" << matlabUniqueName << ");\n"; - file.oss << " end\n"; -} - -/* ************************************************************************* */ -string Deconstructor::wrapper_fragment(FileWriter& file, - const string& cppClassName, - const string& matlabUniqueName, - int id) const { - - const string matlabName = matlab_wrapper_name(matlabUniqueName); - - const string wrapFunctionName = matlabUniqueName + "_deconstructor_" + boost::lexical_cast(id); - - file.oss << "void " << wrapFunctionName << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])" << endl; - file.oss << "{" << endl; - file.oss << " typedef boost::shared_ptr<" << cppClassName << "> Shared;" << endl; - //Deconstructor takes 1 arg, the mxArray obj - file.oss << " checkArguments(\"" << matlabName << "\",nargout,nargin," << "1" << ");" << endl; - file.oss << " Shared *self = *reinterpret_cast(mxGetData(in[0]));\n"; - file.oss << " Collector_" << matlabUniqueName << "::iterator item;\n"; - file.oss << " item = collector_" << matlabUniqueName << ".find(self);\n"; - file.oss << " if(item != collector_" << matlabUniqueName << ".end()) {\n"; - file.oss << " delete self;\n"; - file.oss << " collector_" << matlabUniqueName << ".erase(item);\n"; - file.oss << " }\n"; - file.oss << "}" << endl; - - return wrapFunctionName; -} - -/* ************************************************************************* */ diff --git a/wrap/Deconstructor.h b/wrap/Deconstructor.h deleted file mode 100644 index ee2f4ea19..000000000 --- a/wrap/Deconstructor.h +++ /dev/null @@ -1,61 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Deconstructor.h - * @brief class describing a constructor + code generation - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include -#include - -#include "Argument.h" - -namespace wrap { - -// Deconstructor class -struct Deconstructor { - - /// Deconstructor creates an empty class - Deconstructor(bool verbose = true) : - verbose_(verbose) { - } - - // Then the instance variables are set directly by the Module deconstructor - std::string name; - bool verbose_; - - // MATLAB code generation - // toolboxPath is main toolbox directory, e.g., ../matlab - // classFile is class proxy file, e.g., ../matlab/@Point2/Point2.m - - /// wrapper name - std::string matlab_wrapper_name(const std::string& className) const; - - /// m-file - void proxy_fragment(FileWriter& file, - const std::string& wrapperName, - const std::string& matlabUniqueName, int id) const; - - /// cpp wrapper - std::string wrapper_fragment(FileWriter& file, - const std::string& cppClassName, - const std::string& matlabUniqueName, - int id) const; -}; - -} // \namespace wrap - diff --git a/wrap/FileWriter.cpp b/wrap/FileWriter.cpp deleted file mode 100644 index c07de0eb0..000000000 --- a/wrap/FileWriter.cpp +++ /dev/null @@ -1,52 +0,0 @@ -/** - * @file FileWriter.cpp - * - * @date Jan 15, 2012 - * @author Alex Cunningham - */ - -#include "FileWriter.h" -#include "utilities.h" - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -FileWriter::FileWriter(const string& filename, bool verbose, - const string& comment_str) : - verbose_(verbose), filename_(filename), comment_str_(comment_str) { -} - -/* ************************************************************************* */ -void FileWriter::emit(bool add_header, bool force_overwrite) const { - if (verbose_) - cerr << "generating " << filename_ << " "; - // read in file if it exists - string existing_contents; - bool file_exists = true; - try { - existing_contents = file_contents(filename_.c_str(), add_header); - } catch (const CantOpenFile& ) { - file_exists = false; - } - - // Only write a file if it is new, an update, or overwrite is forced - string new_contents = oss.str(); - if (force_overwrite || !file_exists || existing_contents != new_contents) { - // Binary to use LF line endings instead of CRLF - ofstream ofs(filename_.c_str(), ios::binary); - if (!ofs) - throw CantOpenFile(filename_); - - // dump in stringstream - ofs << new_contents; - ofs.close(); - } - if (verbose_) - cerr << " ...no update" << endl; -} -/* ************************************************************************* */ - diff --git a/wrap/FileWriter.h b/wrap/FileWriter.h deleted file mode 100644 index 12e033fdf..000000000 --- a/wrap/FileWriter.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * @file FileWriter.h - * - * @brief Wrapper for writing files and avoiding overwriting existing files - * This class wraps a stream object and will check that the file is - * actually different to write the new generated file. - * - * @date Jan 15, 2012 - * @author Alex Cunningham - */ - -#pragma once - -#include - -namespace wrap { - -class FileWriter { -protected: - bool verbose_; - std::string filename_; - std::string comment_str_; - -public: - std::ostringstream oss; ///< Primary stream for operating on the file - - /** Create a writer with a filename and delimiter for the header comment */ - FileWriter(const std::string& filename, bool verbose, const std::string& comment_str); - - /** Writes the contents of the stringstream to the file, checking if actually new */ - void emit(bool add_header, bool force=false) const; - -}; - -} // \namespace wrap diff --git a/wrap/ForwardDeclaration.h b/wrap/ForwardDeclaration.h deleted file mode 100644 index 190387ecc..000000000 --- a/wrap/ForwardDeclaration.h +++ /dev/null @@ -1,36 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Class.h - * @brief describe the C++ class that is being wrapped - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include - -namespace wrap { - - class Class; - - struct ForwardDeclaration { - Class cls; - bool isVirtual; - ForwardDeclaration() : isVirtual(false) {} - explicit ForwardDeclaration(const std::string& s) : cls(s), isVirtual(false) {} - std::string name() const { return cls.qualifiedName("::"); } - }; - -} diff --git a/wrap/FullyOverloadedFunction.cpp b/wrap/FullyOverloadedFunction.cpp deleted file mode 100644 index 4db4c8713..000000000 --- a/wrap/FullyOverloadedFunction.cpp +++ /dev/null @@ -1,34 +0,0 @@ -#include "FullyOverloadedFunction.h" - -using namespace std; - -namespace wrap { -const std::array FullyOverloadedFunction::pythonKeywords{ - {"print", "lambda"}}; - -/* ************************************************************************* */ -std::string FullyOverloadedFunction::pyx_functionCall( - const std::string& caller, - const std::string& funcName, size_t iOverload) const { - - string ret; - if (!returnVals_[iOverload].isPair && !returnVals_[iOverload].type1.isPtr && - returnVals_[iOverload].type1.isNonBasicType()) { - ret = returnVals_[iOverload].type1.make_shared_pxd_class_in_pyx() + "("; - } - - // actual function call ... - if (!caller.empty()) ret += caller + "."; - ret += funcName; - if (templateArgValue_) ret += "[" + templateArgValue_->pxd_class_in_pyx() + "]"; - //... with argument list - ret += "(" + argumentList(iOverload).pyx_asParams() + ")"; - - if (!returnVals_[iOverload].isPair && !returnVals_[iOverload].type1.isPtr && - returnVals_[iOverload].type1.isNonBasicType()) - ret += ")"; - - return ret; -} - -} diff --git a/wrap/FullyOverloadedFunction.h b/wrap/FullyOverloadedFunction.h deleted file mode 100644 index 6b40f6a70..000000000 --- a/wrap/FullyOverloadedFunction.h +++ /dev/null @@ -1,147 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file FullyOverloadedFunction.h - * @brief Function that can be fully overloaded: arguments and return values - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#pragma once - -#include "OverloadedFunction.h" -#include - -namespace wrap { - -/** - * Signature Overload (including return value) - */ -class SignatureOverloads: public ArgumentOverloads { - -public: - - std::vector returnVals_; - -public: - - const ReturnValue& returnValue(size_t i) const { - return returnVals_.at(i); - } - - void push_back(const ArgumentList& args, const ReturnValue& retVal) { - argLists_.push_back(args); - returnVals_.push_back(retVal); - } - - void verifyReturnTypes(const std::vector& validtypes, - const std::string& s) const { - for(const ReturnValue& retval: returnVals_) { - retval.type1.verify(validtypes, s); - if (retval.isPair) - retval.type2.verify(validtypes, s); - } - } - - // TODO use transform ? - std::vector expandReturnValuesTemplate( - const TemplateSubstitution& ts) const { - std::vector result; - for(const ReturnValue& retVal: returnVals_) { - ReturnValue instRetVal = retVal.expandTemplate(ts); - result.push_back(instRetVal); - } - return result; - } - - /// Expand templates, imperative ! - void expandTemplate(const TemplateSubstitution& ts) { - // substitute template in arguments - argLists_ = expandArgumentListsTemplate(ts); - // do the same for return types - returnVals_ = expandReturnValuesTemplate(ts); - } - - // emit a list of comments, one for each overload - void usage_fragment(FileWriter& proxyFile, const std::string& name) const { - unsigned int argLCount = 0; - for(ArgumentList argList: argLists_) { - argList.emit_prototype(proxyFile, name); - if (argLCount != nrOverloads() - 1) - proxyFile.oss << ", "; - else - proxyFile.oss << " : returns " << returnValue(0).returnType() - << std::endl; - argLCount++; - } - } - - // emit a list of comments, one for each overload - void comment_fragment(FileWriter& proxyFile, const std::string& name) const { - size_t i = 0; - for(ArgumentList argList: argLists_) { - proxyFile.oss << "%"; - argList.emit_prototype(proxyFile, name); - proxyFile.oss << " : returns " << returnVals_[i++].returnType() - << std::endl; - } - } - - friend std::ostream& operator<<(std::ostream& os, - const SignatureOverloads& overloads) { - for (size_t i = 0; i < overloads.nrOverloads(); i++) - os << overloads.returnVals_[i] << overloads.argLists_[i] << std::endl; - return os; - } - -}; - -class FullyOverloadedFunction: public Function, public SignatureOverloads { - -public: - - bool addOverload(const std::string& name, const ArgumentList& args, - const ReturnValue& retVal, boost::optional instName = - boost::none, bool verbose = false) { - bool first = initializeOrCheck(name, instName, verbose); - SignatureOverloads::push_back(args, retVal); - return first; - } - - // emit cython pyx function call - std::string pyx_functionCall(const std::string& caller, const std::string& funcName, - size_t iOverload) const; - - /// Cython: Rename functions which names are python keywords - static const std::array pythonKeywords; - static std::string pyRename(const std::string& name) { - if (std::find(pythonKeywords.begin(), pythonKeywords.end(), name) == - pythonKeywords.end()) - return name; - else - return name + "_"; - } -}; - -// Templated checking functions -// TODO: do this via polymorphism, use transform ? - -template -inline void verifyReturnTypes(const std::vector& validTypes, - const std::map& vt) { - typedef typename std::map::value_type NamedMethod; - for(const NamedMethod& namedMethod: vt) - namedMethod.second.verifyReturnTypes(validTypes); -} - -} // \namespace wrap - diff --git a/wrap/Function.cpp b/wrap/Function.cpp deleted file mode 100644 index 80b0adbbe..000000000 --- a/wrap/Function.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Function.ccp - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#include "Function.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -bool Function::initializeOrCheck(const string& name, - boost::optional instName, bool verbose) { - - if (name.empty()) - throw runtime_error("Function::initializeOrCheck called with empty name"); - - // Check if this overload is give to the correct method - if (name_.empty()) { - name_ = name; - templateArgValue_ = instName; - verbose_ = verbose; - return true; - } else { - if (name_ != name || verbose_ != verbose - || ((bool) templateArgValue_ != (bool) instName) - || ((bool) templateArgValue_ && (bool) instName - && !(*templateArgValue_ == *instName))) - throw runtime_error( - "Function::initializeOrCheck called with different arguments"); - - return false; - } -} - -/* ************************************************************************* */ -void Function::emit_call(FileWriter& proxyFile, const ReturnValue& returnVal, - const string& wrapperName, int id) const { - returnVal.emit_matlab(proxyFile); - proxyFile.oss << wrapperName << "(" << id; - if (!isStatic()) - proxyFile.oss << ", this"; - proxyFile.oss << ", varargin{:});\n"; -} - -/* ************************************************************************* */ -void Function::emit_conditional_call(FileWriter& proxyFile, - const ReturnValue& returnVal, const ArgumentList& args, - const string& wrapperName, int id) const { - - // Check all arguments - args.proxy_check(proxyFile); - - // output call to C++ wrapper - proxyFile.oss << " "; - emit_call(proxyFile, returnVal, wrapperName, id); -} - -/* ************************************************************************* */ diff --git a/wrap/Function.h b/wrap/Function.h deleted file mode 100644 index c39b3231c..000000000 --- a/wrap/Function.h +++ /dev/null @@ -1,73 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Function.h - * @brief Base class for global functions and methods - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#pragma once - -#include "Argument.h" -#include - -namespace wrap { - -/// Function class -class Function { - -protected: - - std::string name_; ///< name of method - boost::optional templateArgValue_; ///< value of template argument if applicable - bool verbose_; - -public: - - /** - * @brief first time, fill in instance variables, otherwise check if same - * @return true if first time, false thereafter - */ - bool initializeOrCheck(const std::string& name, - boost::optional instName = boost::none, bool verbose = - false); - - std::string name() const { - return name_; - } - - /// Only Methods are non-static - virtual bool isStatic() const { - return true; - } - - std::string matlabName() const { - if (templateArgValue_) - return name_ + templateArgValue_->name(); - else - return name_; - } - - /// Emit function call to MATLAB (no argument checking) - void emit_call(FileWriter& proxyFile, const ReturnValue& returnVal, - const std::string& wrapperName, int id) const; - - /// Emit checking arguments and function call to MATLAB - void emit_conditional_call(FileWriter& proxyFile, - const ReturnValue& returnVal, const ArgumentList& args, - const std::string& wrapperName, int id) const; - -}; - -} // \namespace wrap - diff --git a/wrap/GlobalFunction.cpp b/wrap/GlobalFunction.cpp deleted file mode 100644 index 02ab19657..000000000 --- a/wrap/GlobalFunction.cpp +++ /dev/null @@ -1,227 +0,0 @@ -/** - * @file GlobalFunction.cpp - * - * @date Jul 22, 2012 - * @author Alex Cunningham - */ - -#include "GlobalFunction.h" -#include "Class.h" -#include "utilities.h" - -#include - -namespace wrap { - -using namespace std; - -/* ************************************************************************* */ -void GlobalFunction::addOverload(const Qualified& overload, - const ArgumentList& args, const ReturnValue& retVal, const std::string& _includeFile, - boost::optional instName, bool verbose) { - FullyOverloadedFunction::addOverload(overload.name(), args, retVal, instName, - verbose); - overloads.push_back(overload); - includeFile = _includeFile; -} - -/* ************************************************************************* */ -void GlobalFunction::matlab_proxy(const string& toolboxPath, - const string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, vector& functionNames) const { - - // cluster overloads with same namespace - // create new GlobalFunction structures around namespaces - same namespaces and names are overloads - // map of namespace to global function - typedef map GlobalFunctionMap; - GlobalFunctionMap grouped_functions; - for (size_t i = 0; i < overloads.size(); ++i) { - Qualified overload = overloads.at(i); - // use concatenated namespaces as key - string str_ns = qualifiedName("", overload.namespaces()); - const ReturnValue& ret = returnValue(i); - const ArgumentList& args = argumentList(i); - grouped_functions[str_ns].addOverload(overload, args, ret); - } - - size_t lastcheck = grouped_functions.size(); - for(const GlobalFunctionMap::value_type& p: grouped_functions) { - p.second.generateSingleFunction(toolboxPath, wrapperName, typeAttributes, - file, functionNames); - if (--lastcheck != 0) - file.oss << endl; - } -} - -/* ************************************************************************* */ -void GlobalFunction::generateSingleFunction(const string& toolboxPath, - const string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, vector& functionNames) const { - - // create the folder for the namespace - const Qualified& overload1 = overloads.front(); - createNamespaceStructure(overload1.namespaces(), toolboxPath); - - // open destination mfunctionFileName - string mfunctionFileName = overload1.matlabName(toolboxPath); - FileWriter mfunctionFile(mfunctionFileName, verbose_, "%"); - - // get the name of actual matlab object - const string matlabQualName = overload1.qualifiedName("."); - const string matlabUniqueName = overload1.qualifiedName(""); - const string cppName = overload1.qualifiedName("::"); - - mfunctionFile.oss << "function varargout = " << name_ << "(varargin)\n"; - - for (size_t i = 0; i < nrOverloads(); ++i) { - const ArgumentList& args = argumentList(i); - const ReturnValue& returnVal = returnValue(i); - - const int id = functionNames.size(); - - // Output proxy matlab code - mfunctionFile.oss << " " << (i == 0 ? "" : "else"); - emit_conditional_call(mfunctionFile, returnVal, args, wrapperName, id); - - // Output C++ wrapper code - - const string wrapFunctionName = matlabUniqueName + "_" - + boost::lexical_cast(id); - - // call - file.oss << "void " << wrapFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - // start - file.oss << "{\n"; - - // check arguments - // NOTE: for static functions, there is no object passed - file.oss << " checkArguments(\"" << matlabUniqueName - << "\",nargout,nargin," << args.size() << ");\n"; - - // unwrap arguments, see Argument.cpp - args.matlab_unwrap(file, 0); // We start at 0 because there is no self object - - // call method with default type and wrap result - if (returnVal.type1.name() != "void") - returnVal.wrap_result(cppName + "(" + args.names() + ")", file, - typeAttributes); - else - file.oss << cppName + "(" + args.names() + ");\n"; - - // finish - file.oss << "}\n"; - - // Add to function list - functionNames.push_back(wrapFunctionName); - } - - mfunctionFile.oss << " else\n"; - mfunctionFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << "');" << endl; - mfunctionFile.oss << " end" << endl; - - // Close file - mfunctionFile.emit(true); -} - -/* ************************************************************************* */ -void GlobalFunction::python_wrapper(FileWriter& wrapperFile) const { - wrapperFile.oss << "def(\"" << name_ << "\", " << name_ << ");\n"; -} - -/* ************************************************************************* */ -void GlobalFunction::emit_cython_pxd(FileWriter& file) const { - file.oss << "cdef extern from \"" << includeFile << "\" namespace \"" - << overloads[0].qualifiedNamespaces("::") - << "\":" << endl; - for (size_t i = 0; i < nrOverloads(); ++i) { - file.oss << " "; - returnVals_[i].emit_cython_pxd(file, "", vector()); - file.oss << pxdName() + " \"" + overloads[0].qualifiedName("::") + - "\"("; - argumentList(i).emit_cython_pxd(file, "", vector()); - file.oss << ")"; - file.oss << " except +"; - file.oss << "\n"; - } -} - -/* ************************************************************************* */ -void GlobalFunction::emit_cython_pyx_no_overload(FileWriter& file) const { - string funcName = pyxName(); - - // Function definition - file.oss << "def " << funcName; - - // modify name of function instantiation as python doesn't allow overloads - // e.g. template funcName(...) --> funcNameA, funcNameB, funcNameC - if (templateArgValue_) file.oss << templateArgValue_->pyxClassName(); - - // funtion arguments - file.oss << "("; - argumentList(0).emit_cython_pyx(file); - file.oss << "):\n"; - - /// Call cython corresponding function and return - file.oss << argumentList(0).pyx_convertEigenTypeAndStorageOrder(" "); - string ret = pyx_functionCall("", pxdName(), 0); - if (!returnVals_[0].isVoid()) { - file.oss << " cdef " << returnVals_[0].pyx_returnType() - << " ret = " << ret << "\n"; - file.oss << " return " << returnVals_[0].pyx_casting("ret") << "\n"; - } else { - file.oss << " " << ret << "\n"; - } -} - -/* ************************************************************************* */ -void GlobalFunction::emit_cython_pyx(FileWriter& file) const { - string funcName = pyxName(); - - size_t N = nrOverloads(); - if (N == 1) { - emit_cython_pyx_no_overload(file); - return; - } - - // Dealing with overloads.. - file.oss << "def " << funcName << "(*args, **kwargs):\n"; - for (size_t i = 0; i < N; ++i) { - file.oss << " success, results = " << funcName << "_" << i - << "(args, kwargs)\n"; - file.oss << " if success:\n return results\n"; - } - file.oss << " raise TypeError('Could not find the correct overload')\n"; - - for (size_t i = 0; i < N; ++i) { - ArgumentList args = argumentList(i); - file.oss << "def " + funcName + "_" + to_string(i) + "(args, kwargs):\n"; - file.oss << " cdef list __params\n"; - if (!returnVals_[i].isVoid()) { - file.oss << " cdef " << returnVals_[i].pyx_returnType() << " return_value\n"; - } - file.oss << " try:\n"; - file.oss << pyx_resolveOverloadParams(args, false, 2); // lazy: always return None even if it's a void function - - /// Call corresponding cython function - file.oss << argumentList(i).pyx_convertEigenTypeAndStorageOrder(" "); - // catch exception which indicates the parameters passed are incorrect. - file.oss << " except:\n"; - file.oss << " return False, None\n\n"; - - string call = pyx_functionCall("", pxdName(), i); - if (!returnVals_[i].isVoid()) { - file.oss << " return_value = " << call << "\n"; - file.oss << " return True, " << returnVals_[i].pyx_casting("return_value") << "\n"; - } else { - file.oss << " " << call << "\n"; - file.oss << " return True, None\n"; - } - } -} -/* ************************************************************************* */ - -} // \namespace wrap - diff --git a/wrap/GlobalFunction.h b/wrap/GlobalFunction.h deleted file mode 100644 index 099cefa70..000000000 --- a/wrap/GlobalFunction.h +++ /dev/null @@ -1,148 +0,0 @@ -/** - * @file GlobalFunction.h - * - * @brief Implements codegen for a global function wrapped in matlab - * - * @date Jul 22, 2012 - * @author Alex Cunningham - */ - -#pragma once - -#include "FullyOverloadedFunction.h" - -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-variable" -#endif -#include -#include -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif - -namespace bl = boost::lambda; - -namespace wrap { - -struct GlobalFunction: public FullyOverloadedFunction { - - std::vector overloads; ///< Stack of qualified names - std::string includeFile; - - // adds an overloaded version of this function, - void addOverload(const Qualified& overload, const ArgumentList& args, - const ReturnValue& retVal, const std::string& _includeFile = "", boost::optional instName = - boost::none, bool verbose = false); - - void verifyArguments(const std::vector& validArgs) const { - SignatureOverloads::verifyArguments(validArgs, name_); - } - - void verifyReturnTypes(const std::vector& validtypes) const { - SignatureOverloads::verifyReturnTypes(validtypes, name_); - } - - // codegen function called from Module to build the cpp and matlab versions of the function - void matlab_proxy(const std::string& toolboxPath, - const std::string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, std::vector& functionNames) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile) const; - - // function name in Cython pxd - std::string pxdName() const { return "pxd_" + pyRename(name_); } - // function name in Python pyx - std::string pyxName() const { - std::string result = ""; - for(size_t i=0; i= 1) { - result += (overloads[0].namespaces_[i] + "_"); - } - } - result += pyRename(name_); - return result; - } - - // emit cython wrapper - void emit_cython_pxd(FileWriter& pxdFile) const; - void emit_cython_pyx(FileWriter& pyxFile) const; - void emit_cython_pyx_no_overload(FileWriter& pyxFile) const; - -private: - - // Creates a single global function - all in same namespace - void generateSingleFunction(const std::string& toolboxPath, - const std::string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, std::vector& functionNames) const; - -}; - -typedef std::map GlobalFunctions; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct GlobalFunctionGrammar: public classic::grammar { - - GlobalFunctions& global_functions_; ///< successful parse will be placed in here - std::vector& namespaces_; - std::string& includeFile; - - /// Construct type grammar and specify where result is placed - GlobalFunctionGrammar(GlobalFunctions& global_functions, - std::vector& namespaces, - std::string& includeFile) - : global_functions_(global_functions), - namespaces_(namespaces), - includeFile(includeFile) {} - - /// Definition of type grammar - template - struct definition: BasicRules { - -// using BasicRules::name_p; -// using BasicRules::className_p; - using BasicRules::comments_p; - - ArgumentList args; - ArgumentListGrammar argumentList_g; - - ReturnValue retVal0, retVal; - ReturnValueGrammar returnValue_g; - - Qualified globalFunction; - - classic::rule globalFunctionName_p, global_function_p; - - definition(GlobalFunctionGrammar const& self) : - argumentList_g(args), returnValue_g(retVal) { - - using namespace classic; - bool verbose = false; // TODO - - globalFunctionName_p = lexeme_d[(upper_p | lower_p) >> *(alnum_p | '_')]; - - // parse a global function - global_function_p = (returnValue_g >> globalFunctionName_p[assign_a( - globalFunction.name_)] >> - argumentList_g >> ';' >> *comments_p) // - [assign_a(globalFunction.namespaces_, self.namespaces_)] // - [bl::bind( - &GlobalFunction::addOverload, - bl::var(self.global_functions_)[bl::var(globalFunction.name_)], - bl::var(globalFunction), bl::var(args), bl::var(retVal), bl::var(self.includeFile), - boost::none, verbose)] // - [assign_a(retVal, retVal0)][clear_a(globalFunction)][clear_a(args)]; - } - - classic::rule const& start() const { - return global_function_p; - } - - }; -}; -// GlobalFunctionGrammar - -}// \namespace wrap - diff --git a/wrap/Method.cpp b/wrap/Method.cpp deleted file mode 100644 index 2a4b0b3af..000000000 --- a/wrap/Method.cpp +++ /dev/null @@ -1,205 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Method.ccp - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#include "Method.h" -#include "Class.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -bool Method::addOverload(Str name, const ArgumentList& args, - const ReturnValue& retVal, bool is_const, - boost::optional instName, - bool verbose) { - bool first = MethodBase::addOverload(name, args, retVal, instName, verbose); - if (first) - is_const_ = is_const; - else if (is_const && !is_const_) - throw std::runtime_error( - "Method::addOverload: " + name + - " now designated as const whereas before it was not"); - else if (!is_const && is_const_) - throw std::runtime_error( - "Method::addOverload: " + name + - " now designated as non-const whereas before it was"); - return first; -} - -/* ************************************************************************* */ -void Method::proxy_header(FileWriter& proxyFile) const { - proxyFile.oss << " function varargout = " << matlabName() - << "(this, varargin)\n"; -} - -/* ************************************************************************* */ -string Method::wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, - const ArgumentList& args) const { - // check arguments - // extra argument obj -> nargin-1 is passed ! - // example: checkArguments("equals",nargout,nargin-1,2); - wrapperFile.oss << " checkArguments(\"" << matlabName() - << "\",nargout,nargin-1," << args.size() << ");\n"; - - // get class pointer - // example: auto obj = unwrap_shared_ptr< Test >(in[0], "Test"); - wrapperFile.oss << " auto obj = unwrap_shared_ptr<" << cppClassName - << ">(in[0], \"ptr_" << matlabUniqueName << "\");" << endl; - - // unwrap arguments, see Argument.cpp, we start at 1 as first is obj - args.matlab_unwrap(wrapperFile, 1); - - // call method and wrap result - // example: out[0]=wrap(obj->return_field(t)); - string expanded = "obj->" + name_; - if (templateArgValue_) - expanded += ("<" + templateArgValue_->qualifiedName("::") + ">"); - - return expanded; -} - -/* ************************************************************************* */ -void Method::emit_cython_pxd(FileWriter& file, const Class& cls) const { - for (size_t i = 0; i < nrOverloads(); ++i) { - file.oss << " "; - returnVals_[i].emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - const string renamed = pyRename(name_); - if (renamed != name_) { - file.oss << pyRename(name_) + " \"" + name_ + "\"" << "("; - } else { - file.oss << name_ << "("; - } - argumentList(i).emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - file.oss << ")"; - // if (is_const_) file.oss << " const"; - file.oss << " except +"; - file.oss << "\n"; - } -} - -/* ************************************************************************* */ -void Method::emit_cython_pyx_no_overload(FileWriter& file, - const Class& cls) const { - string funcName = pyRename(name_); - - // leverage python's special treatment for print - if (funcName == "print_") { - file.oss << " def __repr__(self):\n"; - file.oss << " strBuf = RedirectCout()\n"; - file.oss << " self.print_('')\n"; - file.oss << " return strBuf.str()\n"; - } - - // Function definition - file.oss << " def " << funcName; - - // modify name of function instantiation as python doesn't allow overloads - // e.g. template funcName(...) --> funcNameA, funcNameB, funcNameC - if (templateArgValue_) file.oss << templateArgValue_->pyxClassName(); - - // function arguments - file.oss << "(self"; - if (argumentList(0).size() > 0) file.oss << ", "; - argumentList(0).emit_cython_pyx(file); - file.oss << "):\n"; - - /// Call cython corresponding function and return - file.oss << argumentList(0).pyx_convertEigenTypeAndStorageOrder(" "); - string caller = "self." + cls.shared_pxd_obj_in_pyx() + ".get()"; - string ret = pyx_functionCall(caller, funcName, 0); - if (!returnVals_[0].isVoid()) { - file.oss << " cdef " << returnVals_[0].pyx_returnType() - << " ret = " << ret << "\n"; - file.oss << " return " << returnVals_[0].pyx_casting("ret") << "\n"; - } else { - file.oss << " " << ret << "\n"; - } -} - -/* ************************************************************************* */ -void Method::emit_cython_pyx(FileWriter& file, const Class& cls) const { - string funcName = pyRename(name_); - // For template function: modify name of function instantiation as python - // doesn't allow overloads - // e.g. template funcName(...) --> funcNameA, funcNameB, funcNameC - string instantiatedName = - (templateArgValue_) ? funcName + templateArgValue_->pyxClassName() : - funcName; - - size_t N = nrOverloads(); - // It's easy if there's no overload - if (N == 1) { - emit_cython_pyx_no_overload(file, cls); - return; - } - - // Dealing with overloads.. - file.oss << " def " << instantiatedName << "(self, *args, **kwargs):\n"; - file.oss << " cdef list __params\n"; - - // Define return values for all possible overloads - vector return_type; // every overload has a return type, possibly void - map return_value; // we only define one return value for every distinct type - size_t j = 1; - for (size_t i = 0; i < nrOverloads(); ++i) { - if (returnVals_[i].isVoid()) { - return_type.push_back("void"); - } else { - const string type = returnVals_[i].pyx_returnType(); - return_type.push_back(type); - if (return_value.count(type) == 0) { - const string value = "return_value_" + to_string(j++); - return_value[type] = value; - file.oss << " cdef " << type << " " << value << "\n"; - } - } - } - - for (size_t i = 0; i < nrOverloads(); ++i) { - ArgumentList args = argumentList(i); - file.oss << " try:\n"; - file.oss << pyx_resolveOverloadParams(args, false, 3); // lazy: always return None even if it's a void function - - /// Call corresponding cython function - file.oss << args.pyx_convertEigenTypeAndStorageOrder(" "); - string caller = "self." + cls.shared_pxd_obj_in_pyx() + ".get()"; - string call = pyx_functionCall(caller, funcName, i); - if (!returnVals_[i].isVoid()) { - const string type = return_type[i]; - const string value = return_value[type]; - file.oss << " " << value << " = " << call << "\n"; - file.oss << " return " << returnVals_[i].pyx_casting(value) - << "\n"; - } else { - file.oss << " " << call << "\n"; - file.oss << " return\n"; - } - file.oss << " except (AssertionError, ValueError):\n"; - file.oss << " pass\n"; - } - file.oss - << " raise TypeError('Incorrect arguments or types for method call.')\n\n"; -} -/* ************************************************************************* */ diff --git a/wrap/Method.h b/wrap/Method.h deleted file mode 100644 index 4d3c8d909..000000000 --- a/wrap/Method.h +++ /dev/null @@ -1,74 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Method.h - * @brief describes and generates code for methods - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#pragma once - -#include "MethodBase.h" - -namespace wrap { - -/// Method class -class Method: public MethodBase { - -protected: - bool is_const_; - -public: - - typedef const std::string& Str; - - bool addOverload(Str name, const ArgumentList& args, - const ReturnValue& retVal, bool is_const, - boost::optional instName = boost::none, bool verbose = - false); - - bool isStatic() const override { - return false; - } - - virtual bool isConst() const { - return is_const_; - } - - bool isSameModifiers(const Method& other) const { - return is_const_ == other.is_const_ && - ((templateArgValue_ && other.templateArgValue_) || - (!templateArgValue_ && !other.templateArgValue_)); - } - - friend std::ostream& operator<<(std::ostream& os, const Method& m) { - for (size_t i = 0; i < m.nrOverloads(); i++) - os << m.returnVals_[i] << " " << m.name_ << m.argLists_[i]; - return os; - } - - void emit_cython_pxd(FileWriter& file, const Class& cls) const; - void emit_cython_pyx(FileWriter& file, const Class& cls) const; - void emit_cython_pyx_no_overload(FileWriter& file, const Class& cls) const; - -private: - - // Emit method header - void proxy_header(FileWriter& proxyFile) const override; - - std::string wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, const ArgumentList& args) const override; -}; - -} // \namespace wrap - diff --git a/wrap/MethodBase.cpp b/wrap/MethodBase.cpp deleted file mode 100644 index a2ed68780..000000000 --- a/wrap/MethodBase.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file MethodBase.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Method.h" -#include "Class.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void MethodBase::proxy_wrapper_fragments( - FileWriter& proxyFile, FileWriter& wrapperFile, Str cppClassName, - Str matlabQualName, Str matlabUniqueName, Str wrapperName, - const TypeAttributesTable& typeAttributes, - vector& functionNames) const { - // emit header, e.g., function varargout = templatedMethod(this, varargin) - proxy_header(proxyFile); - - // Emit comments for documentation - string up_name = boost::to_upper_copy(matlabName()); - proxyFile.oss << " % " << up_name << " usage: "; - usage_fragment(proxyFile, matlabName()); - - // Emit URL to Doxygen page - proxyFile.oss << " % " - << "Doxygen can be found at " - "http://research.cc.gatech.edu/borg/sites/edu.borg/html/" - "index.html" << endl; - - // Handle special case of single overload with all numeric arguments - if (nrOverloads() == 1 && argumentList(0).allScalar()) { - // Output proxy matlab code - // TODO: document why is it OK to not check arguments in this case - proxyFile.oss << " "; - const int id = (int)functionNames.size(); - emit_call(proxyFile, returnValue(0), wrapperName, id); - - // Output C++ wrapper code - const string wrapFunctionName = wrapper_fragment( - wrapperFile, cppClassName, matlabUniqueName, 0, id, typeAttributes); - - // Add to function list - functionNames.push_back(wrapFunctionName); - } else { - // Check arguments for all overloads - for (size_t i = 0; i < nrOverloads(); ++i) { - // Output proxy matlab code - proxyFile.oss << " " << (i == 0 ? "" : "else"); - const int id = (int)functionNames.size(); - emit_conditional_call(proxyFile, returnValue(i), argumentList(i), - wrapperName, id); - - // Output C++ wrapper code - const string wrapFunctionName = wrapper_fragment( - wrapperFile, cppClassName, matlabUniqueName, i, id, typeAttributes); - - // Add to function list - functionNames.push_back(wrapFunctionName); - } - proxyFile.oss << " else\n"; - proxyFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << "." << name_ << "');" << endl; - proxyFile.oss << " end\n"; - } - - proxyFile.oss << " end\n"; -} - -/* ************************************************************************* */ -string MethodBase::wrapper_fragment( - FileWriter& wrapperFile, Str cppClassName, Str matlabUniqueName, - int overload, int id, const TypeAttributesTable& typeAttributes) const { - // generate code - - const string wrapFunctionName = - matlabUniqueName + "_" + name_ + "_" + boost::lexical_cast(id); - - const ArgumentList& args = argumentList(overload); - const ReturnValue& returnVal = returnValue(overload); - - // call - wrapperFile.oss - << "void " << wrapFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - // start - wrapperFile.oss << "{\n"; - - // get call - // for static methods: cppClassName::staticMethod - // for instance methods: obj->instanceMethod - string expanded = - wrapper_call(wrapperFile, cppClassName, matlabUniqueName, args); - - expanded += ("(" + args.names() + ")"); - if (returnVal.type1.name() != "void") - returnVal.wrap_result(expanded, wrapperFile, typeAttributes); - else - wrapperFile.oss << " " + expanded + ";\n"; - - // finish - wrapperFile.oss << "}\n"; - - return wrapFunctionName; -} - -/* ************************************************************************* */ -void MethodBase::python_wrapper(FileWriter& wrapperFile, Str className) const { - wrapperFile.oss << " .def(\"" << name_ << "\", &" << className - << "::" << name_ << ");\n"; -} - -/* ************************************************************************* */ diff --git a/wrap/MethodBase.h b/wrap/MethodBase.h deleted file mode 100644 index ee72a6a53..000000000 --- a/wrap/MethodBase.h +++ /dev/null @@ -1,70 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file MethodBase.h - * @brief describes and generates code for static methods - * @author Frank Dellaert - * @author Alex Cunningham - * @author Richard Roberts - **/ - -#pragma once - -#include "FullyOverloadedFunction.h" - -namespace wrap { - -// Forward declaration -class Class; - -/// MethodBase class -struct MethodBase : public FullyOverloadedFunction { - typedef const std::string& Str; - - // emit a list of comments, one for each overload - void comment_fragment(FileWriter& proxyFile) const { - SignatureOverloads::comment_fragment(proxyFile, matlabName()); - } - - void verifyArguments(const std::vector& validArgs) const { - SignatureOverloads::verifyArguments(validArgs, name_); - } - - void verifyReturnTypes(const std::vector& validtypes) const { - SignatureOverloads::verifyReturnTypes(validtypes, name_); - } - - // MATLAB code generation - // classPath is class directory, e.g., ../matlab/@Point2 - void proxy_wrapper_fragments(FileWriter& proxyFile, FileWriter& wrapperFile, - Str cppClassName, Str matlabQualName, - Str matlabUniqueName, Str wrapperName, - const TypeAttributesTable& typeAttributes, - std::vector& functionNames) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile, Str className) const; - -protected: - virtual void proxy_header(FileWriter& proxyFile) const = 0; - - std::string wrapper_fragment( - FileWriter& wrapperFile, Str cppClassName, Str matlabUniqueName, - int overload, int id, - const TypeAttributesTable& typeAttributes) const; ///< cpp wrapper - - virtual std::string wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, - const ArgumentList& args) const = 0; -}; - -} // \namespace wrap diff --git a/wrap/Module.cpp b/wrap/Module.cpp deleted file mode 100644 index 780c6f8da..000000000 --- a/wrap/Module.cpp +++ /dev/null @@ -1,649 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Module.ccp - * @author Frank Dellaert - * @author Alex Cunningham - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Module.h" -#include "FileWriter.h" -#include "TypeAttributesTable.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; -using namespace BOOST_SPIRIT_CLASSIC_NS; -namespace bl = boost::lambda; -namespace fs = boost::filesystem; - -/* ************************************************************************* */ -// We parse an interface file into a Module object. -// The grammar is defined using the boost/spirit combinatorial parser. -// For example, str_p("const") parses the string "const", and the >> -// operator creates a sequence parser. The grammar below, composed of rules -// and with start rule [class_p], doubles as the specs for our interface files. -/* ************************************************************************* */ - -/* ************************************************************************* */ -// If a number of template arguments were given, generate a number of expanded -// class names, e.g., PriorFactor -> PriorFactorPose2, and add those classes -static void handle_possible_template(vector& classes, - vector& uninstantiatedClasses, - const Class& cls, const Template& t) { - uninstantiatedClasses.push_back(cls); - if (cls.templateArgs.empty() || t.empty()) { - classes.push_back(cls); - } else { - if (cls.templateArgs.size() != 1) - throw std::runtime_error( - "In-line template instantiations only handle a single template argument"); - string arg = cls.templateArgs.front(); - vector classInstantiations = - (t.nrValues() > 0) ? cls.expandTemplate(arg, t.argValues()) : - cls.expandTemplate(arg, t.intList()); - for(const Class& c: classInstantiations) - classes.push_back(c); - } -} - -static void push_typedef_pair(vector& typedefs, - const Qualified& oldType, - const Qualified& newType, - const string& includeFile) { - typedefs.push_back(TypedefPair(oldType, newType, includeFile)); -} - -/* ************************************************************************* */ -Module::Module(const std::string& moduleName, bool enable_verbose) -: name(moduleName), verbose(enable_verbose) -{ -} - -/* ************************************************************************* */ -Module::Module(const string& interfacePath, - const string& moduleName, bool enable_verbose) -: name(moduleName), verbose(enable_verbose) -{ - // read interface file - string interfaceFile = interfacePath + "/" + moduleName + ".h"; - string contents = file_contents(interfaceFile); - - // execute parsing - parseMarkup(contents); -} - -/* ************************************************************************* */ -void Module::parseMarkup(const std::string& data) { - // The parse imperatively :-( updates variables gradually during parse - // The one with postfix 0 are used to reset the variables after parse. - - //---------------------------------------------------------------------------- - // Grammar with actions that build the Class object. Actions are - // defined within the square brackets [] and are executed whenever a - // rule is successfully parsed. Define BOOST_SPIRIT_DEBUG to debug. - // The grammar is allows a very restricted C++ header - // lexeme_d turns off white space skipping - // http://www.boost.org/doc/libs/1_37_0/libs/spirit/classic/doc/directives.html - // ---------------------------------------------------------------------------- - - // Define Rule and instantiate basic rules - typedef rule Rule; - BasicRules basic; - - vector namespaces; // current namespace tag - string currentInclude; - - // parse a full class - Class cls0(verbose),cls(verbose); - Template classTemplate; - ClassGrammar class_g(cls,classTemplate); - Rule class_p = class_g // - [assign_a(cls.namespaces_, namespaces)] - [assign_a(cls.includeFile, currentInclude)][bl::bind( - &handle_possible_template, bl::var(classes), - bl::var(uninstantiatedClasses), bl::var(cls), - bl::var(classTemplate))][clear_a(classTemplate)] // - [assign_a(cls, cls0)]; - - // parse "gtsam::Pose2" and add to singleInstantiation.typeList - TemplateInstantiationTypedef singleInstantiation, singleInstantiation0; - TypeListGrammar<'<','>'> typelist_g(singleInstantiation.typeList); - - // typedef gtsam::RangeFactor RangeFactor2D; - TypeGrammar instantiationClass_g(singleInstantiation.class_); - Rule templateSingleInstantiation_p = - (str_p("typedef") >> instantiationClass_g >> - typelist_g >> - basic.className_p[assign_a(singleInstantiation.name_)] >> - ';') - [assign_a(singleInstantiation.namespaces_, namespaces)] - [push_back_a(templateInstantiationTypedefs, singleInstantiation)] - [assign_a(singleInstantiation, singleInstantiation0)]; - - Qualified oldType, newType; - TypeGrammar typedefOldClass_g(oldType), typedefNewClass_g(newType); - Rule typedef_p = - (str_p("typedef") >> typedefOldClass_g >> typedefNewClass_g >> - ';') - [assign_a(oldType.namespaces_, namespaces)] - [assign_a(newType.namespaces_, namespaces)] - [bl::bind(&push_typedef_pair, bl::var(typedefs), bl::var(oldType), - bl::var(newType), bl::var(currentInclude))]; - - // Create grammar for global functions - GlobalFunctionGrammar global_function_g(global_functions, namespaces, - currentInclude); - - Rule include_p = str_p("#include") >> ch_p('<') >> - (*(anychar_p - '>'))[push_back_a(includes)] - [assign_a(currentInclude)] >> - ch_p('>'); - -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wuninitialized" -#endif - - Rule namespace_def_p = - (str_p("namespace") - >> basic.namespace_p[push_back_a(namespaces)] - >> ch_p('{') - >> *(include_p | class_p | templateSingleInstantiation_p | typedef_p | global_function_g | namespace_def_p | basic.comments_p) - >> ch_p('}')) - [pop_a(namespaces)]; - -#ifdef __clang__ -#pragma clang diagnostic pop -#endif - - // parse forward declaration - ForwardDeclaration fwDec0, fwDec; - Class fwParentClass; - TypeGrammar className_g(fwDec.cls); - TypeGrammar classParent_g(fwParentClass); - Rule classParent_p = (':' >> classParent_g >> ';') // - [bl::bind(&Class::assignParent, bl::var(fwDec.cls), - bl::var(fwParentClass))][clear_a(fwParentClass)]; - - Rule forward_declaration_p = - !(str_p("virtual")[assign_a(fwDec.isVirtual, T)]) - >> str_p("class") >> className_g - >> (classParent_p | ';') - [push_back_a(forward_declarations, fwDec)] - [assign_a(cls,cls0)] // also clear class to avoid partial parse - [assign_a(fwDec, fwDec0)]; - - Rule module_content_p = basic.comments_p | include_p | class_p - | templateSingleInstantiation_p | forward_declaration_p - | global_function_g | namespace_def_p; - - Rule module_p = *module_content_p >> !end_p; - - // and parse contents - parse_info info = parse(data.c_str(), module_p, space_p); - if(!info.full) { - printf("parsing stopped at \n%.20s\n",info.stop); - cout << "Stopped in:\n" - "class '" << cls.name_ << "'" << endl; - throw ParseFailed((int)info.length); - } - - // Post-process classes for serialization markers - for(Class& cls: classes) - cls.erase_serialization(); - - for(Class& cls: uninstantiatedClasses) - cls.erase_serialization(); - - // Explicitly add methods to the classes from parents so it shows in documentation - for(Class& cls: classes) - cls.appendInheritedMethods(cls, classes); - - // - Remove inherited methods for Cython classes in the pxd, otherwise Cython can't decide which one to call. - // - Only inherited nontemplateMethods_ in uninstantiatedClasses need to be removed - // because that what we serialized to the pxd. - // - However, we check against the class parent's *methods_* to avoid looking into - // its grand parent and grand-grand parent, etc., because all those are already - // added in its direct parent. - // - So this must be called *after* the above code appendInheritedMethods!! - for(Class& cls: uninstantiatedClasses) - cls.removeInheritedNontemplateMethods(uninstantiatedClasses); - - // Expand templates - This is done first so that template instantiations are - // counted in the list of valid types, have their attributes and dependencies - // checked, etc. - expandedClasses = ExpandTypedefInstantiations(classes, - templateInstantiationTypedefs); - - // Dependency check list - vector validTypes = GenerateValidTypes(expandedClasses, - forward_declarations, typedefs); - - // Check that all classes have been defined somewhere - verifyArguments(validTypes, global_functions); - verifyReturnTypes(validTypes, global_functions); - - hasSerialiable = false; - for(const Class& cls: expandedClasses) - cls.verifyAll(validTypes,hasSerialiable); - - // Create type attributes table and check validity - typeAttributes.addClasses(expandedClasses); - typeAttributes.addForwardDeclarations(forward_declarations); - for (const TypedefPair& p: typedefs) - typeAttributes.addType(p.newType); - // add Eigen types as template arguments are also checked ? - vector eigen; - eigen.push_back(ForwardDeclaration("Vector")); - eigen.push_back(ForwardDeclaration("Matrix")); - typeAttributes.addForwardDeclarations(eigen); - typeAttributes.checkValidity(expandedClasses); -} - -/* ************************************************************************* */ -void Module::generate_matlab_wrapper(const string& toolboxPath) const { - - fs::create_directories(toolboxPath); - - // create the unified .cpp switch file - const string wrapperName = name + "_wrapper"; - string wrapperFileName = toolboxPath + "/" + wrapperName + ".cpp"; - FileWriter wrapperFile(wrapperFileName, verbose, "//"); - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "\n"; - - // Include boost.serialization archive headers before other class headers - if (hasSerialiable) { - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "#include \n\n"; - } - - // Generate includes while avoiding redundant includes - generateIncludes(wrapperFile); - - // create typedef classes - we put this at the top of the wrap file so that - // collectors and method arguments can use these typedefs - for(const Class& cls: expandedClasses) - if(!cls.typedefName.empty()) - wrapperFile.oss << cls.getTypedef() << "\n"; - wrapperFile.oss << "\n"; - - // Generate boost.serialization export flags (needs typedefs from above) - if (hasSerialiable) { - for(const Class& cls: expandedClasses) - if(cls.isSerializable) - wrapperFile.oss << cls.getSerializationExport() << "\n"; - wrapperFile.oss << "\n"; - } - - // Generate collectors and cleanup function to be called from mexAtExit - WriteCollectorsAndCleanupFcn(wrapperFile, name, expandedClasses); - - // generate RTTI registry (for returning derived-most types) - WriteRTTIRegistry(wrapperFile, name, expandedClasses); - - vector functionNames; // Function names stored by index for switch - - // create proxy class and wrapper code - for(const Class& cls: expandedClasses) - cls.matlab_proxy(toolboxPath, wrapperName, typeAttributes, wrapperFile, functionNames); - - // create matlab files and wrapper code for global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.matlab_proxy(toolboxPath, wrapperName, typeAttributes, wrapperFile, functionNames); - - // finish wrapper file - wrapperFile.oss << "\n"; - finish_wrapper(wrapperFile, functionNames); - - wrapperFile.emit(true); -} - -/* ************************************************************************* */ -void Module::generate_cython_wrapper(const string& toolboxPath, const std::string& pxdImports) const { - fs::create_directories(toolboxPath); - string pxdFileName = toolboxPath + "/" + name + ".pxd"; - FileWriter pxdFile(pxdFileName, verbose, "#"); - pxdFile.oss << pxdImports << "\n"; - emit_cython_pxd(pxdFile); - string pyxFileName = toolboxPath + "/" + name + ".pyx"; - FileWriter pyxFile(pyxFileName, verbose, "#"); - emit_cython_pyx(pyxFile); -} - -/* ************************************************************************* */ -void Module::emit_cython_pxd(FileWriter& pxdFile) const { - // headers - pxdFile.oss << "from gtsam_eigency.core cimport *\n" - "from libcpp.string cimport string\n" - "from libcpp.vector cimport vector\n" - "from libcpp.pair cimport pair\n" - "from libcpp.set cimport set\n" - "from libcpp.map cimport map\n" - "from libcpp cimport bool\n\n"; - - // boost shared_ptr - pxdFile.oss << "cdef extern from \"boost/shared_ptr.hpp\" namespace \"boost\":\n" - " cppclass shared_ptr[T]:\n" - " shared_ptr()\n" - " shared_ptr(T*)\n" - " T* get()\n" - " long use_count() const\n" - " T& operator*()\n\n" - " cdef shared_ptr[T] dynamic_pointer_cast[T,U](const shared_ptr[U]& r)\n\n"; - - // gtsam alignment-friendly shared_ptr - pxdFile.oss << "cdef extern from \"gtsam/base/make_shared.h\" namespace \"gtsam\":\n" - " cdef shared_ptr[T] make_shared[T](const T& r)\n\n"; - - for(const TypedefPair& types: typedefs) - types.emit_cython_pxd(pxdFile); - - //... wrap all classes - for (const Class& cls : uninstantiatedClasses) { - cls.emit_cython_pxd(pxdFile); - - for (const Class& expCls : expandedClasses) { - bool matchingNonTemplated = !expCls.templateClass - && expCls.pxdClassName() == cls.pxdClassName(); - bool isTemplatedFromCls = expCls.templateClass - && expCls.templateClass->pxdClassName() == cls.pxdClassName(); - - // ctypedef for template instantiations - if (isTemplatedFromCls) { - pxdFile.oss << "\n"; - pxdFile.oss << "ctypedef " << expCls.templateClass->pxdClassName() - << "["; - for (size_t i = 0; i < expCls.templateInstTypeList.size(); ++i) - pxdFile.oss << expCls.templateInstTypeList[i].pxdClassName() - << ((i == expCls.templateInstTypeList.size() - 1) ? "" : ", "); - pxdFile.oss << "] " << expCls.pxdClassName() << "\n"; - } - - // Python wrapper class - if (isTemplatedFromCls || matchingNonTemplated) { - expCls.emit_cython_wrapper_pxd(pxdFile); - } - } - pxdFile.oss << "\n\n"; - } - - //... wrap global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.emit_cython_pxd(pxdFile); - - pxdFile.emit(true); -} - -/* ************************************************************************* */ -void Module::emit_cython_pyx(FileWriter& pyxFile) const { - // directives... - // allow str to automatically coerce to std::string and back (for python3) - pyxFile.oss << "# cython: c_string_type=str, c_string_encoding=ascii\n\n"; - - // headers... - string pxdHeader = name; - pyxFile.oss << "cimport numpy as np\n" - "import numpy as npp\n" - "cimport " << pxdHeader << "\n" - "from ."<< pxdHeader << " cimport shared_ptr\n" - "from ."<< pxdHeader << " cimport dynamic_pointer_cast\n" - "from ."<< pxdHeader << " cimport make_shared\n"; - - pyxFile.oss << "# C helper function that copies all arguments into a positional list.\n" - "cdef list process_args(list keywords, tuple args, dict kwargs):\n" - " cdef str keyword\n" - " cdef int n = len(args), m = len(keywords)\n" - " cdef list params = list(args)\n" - " assert len(args)+len(kwargs) == m, 'Expected {} arguments'.format(m)\n" - " try:\n" - " return params + [kwargs[keyword] for keyword in keywords[n:]]\n" - " except:\n" - " raise ValueError('Epected arguments ' + str(keywords))\n"; - - // import all typedefs, e.g. from gtsam_wrapper cimport Key, so we don't need to say gtsam.Key - for(const Qualified& q: Qualified::BasicTypedefs) { - pyxFile.oss << "from " << pxdHeader << " cimport " << q.pxdClassName() << "\n"; - } - pyxFile.oss << "from gtsam_eigency.core cimport *\n" - "from libcpp cimport bool\n\n" - "from libcpp.pair cimport pair\n" - "from libcpp.string cimport string\n" - "from cython.operator cimport dereference as deref\n\n\n"; - - // all classes include all forward declarations - std::vector allClasses = expandedClasses; - for(const ForwardDeclaration& fd: forward_declarations) - allClasses.push_back(fd.cls); - - for(const Class& cls: expandedClasses) - cls.emit_cython_pyx(pyxFile, allClasses); - pyxFile.oss << "\n"; - - //... wrap global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.emit_cython_pyx(pyxFile); - pyxFile.emit(true); -} - -/* ************************************************************************* */ -void Module::generateIncludes(FileWriter& file) const { - - // collect includes - vector all_includes(includes); - - // sort and remove duplicates - sort(all_includes.begin(), all_includes.end()); - vector::const_iterator last_include = unique(all_includes.begin(), all_includes.end()); - vector::const_iterator it = all_includes.begin(); - // add includes to file - for (; it != last_include; ++it) - file.oss << "#include <" << *it << ">" << endl; - file.oss << "\n"; -} - - -/* ************************************************************************* */ - void Module::finish_wrapper(FileWriter& file, const std::vector& functionNames) const { - file.oss << "void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - file.oss << "{\n"; - file.oss << " mstream mout;\n"; // Send stdout to MATLAB console - file.oss << " std::streambuf *outbuf = std::cout.rdbuf(&mout);\n\n"; - file.oss << " _" << name << "_RTTIRegister();\n\n"; - file.oss << " int id = unwrap(in[0]);\n\n"; - file.oss << " try {\n"; - file.oss << " switch(id) {\n"; - for(size_t id = 0; id < functionNames.size(); ++id) { - file.oss << " case " << id << ":\n"; - file.oss << " " << functionNames[id] << "(nargout, out, nargin-1, in+1);\n"; - file.oss << " break;\n"; - } - file.oss << " }\n"; - file.oss << " } catch(const std::exception& e) {\n"; - file.oss << " mexErrMsgTxt((\"Exception from gtsam:\\n\" + std::string(e.what()) + \"\\n\").c_str());\n"; - file.oss << " }\n"; - file.oss << "\n"; - file.oss << " std::cout.rdbuf(outbuf);\n"; // Restore cout - file.oss << "}\n"; - } - -/* ************************************************************************* */ -vector Module::ExpandTypedefInstantiations(const vector& classes, const vector instantiations) { - - vector expandedClasses = classes; - - for(const TemplateInstantiationTypedef& inst: instantiations) { - // Add the new class to the list - expandedClasses.push_back(inst.findAndExpand(classes)); - } - - // Remove all template classes - for(size_t i = 0; i < expandedClasses.size(); ++i) - if(!expandedClasses[i].templateArgs.empty()) { - expandedClasses.erase(expandedClasses.begin() + size_t(i)); - -- i; - } - - return expandedClasses; -} - -/* ************************************************************************* */ -vector Module::GenerateValidTypes(const vector& classes, const vector& forwardDeclarations, const vector& typedefs) { - vector validTypes; - for(const ForwardDeclaration& fwDec: forwardDeclarations) { - validTypes.push_back(fwDec.name()); - } - validTypes.push_back("void"); - validTypes.push_back("string"); - validTypes.push_back("int"); - validTypes.push_back("bool"); - validTypes.push_back("char"); - validTypes.push_back("unsigned char"); - validTypes.push_back("size_t"); - validTypes.push_back("double"); - validTypes.push_back("Vector"); - validTypes.push_back("Matrix"); - //Create a list of parsed classes for dependency checking - for(const Class& cls: classes) { - validTypes.push_back(cls.qualifiedName("::")); - } - for(const TypedefPair& p: typedefs) { - validTypes.push_back(p.newType.qualifiedName("::")); - } - - return validTypes; -} - -/* ************************************************************************* */ -void Module::WriteCollectorsAndCleanupFcn(FileWriter& wrapperFile, const std::string& moduleName, const std::vector& classes) { - // Generate all collectors - for(const Class& cls: classes) { - const string matlabUniqueName = cls.qualifiedName(), - cppName = cls.qualifiedName("::"); - wrapperFile.oss << "typedef std::set*> " - << "Collector_" << matlabUniqueName << ";\n"; - wrapperFile.oss << "static Collector_" << matlabUniqueName << - " collector_" << matlabUniqueName << ";\n"; - } - - // generate mexAtExit cleanup function - wrapperFile.oss << - "\nvoid _deleteAllObjects()\n" - "{\n" - " mstream mout;\n" // Send stdout to MATLAB console - " std::streambuf *outbuf = std::cout.rdbuf(&mout);\n\n" - " bool anyDeleted = false;\n"; - for(const Class& cls: classes) { - const string matlabUniqueName = cls.qualifiedName(); - const string cppName = cls.qualifiedName("::"); - const string collectorType = "Collector_" + matlabUniqueName; - const string collectorName = "collector_" + matlabUniqueName; - // The extra curly-braces around the for loops work around a limitation in MSVC (existing - // since 2005!) preventing more than 248 blocks. - wrapperFile.oss << - " { for(" << collectorType << "::iterator iter = " << collectorName << ".begin();\n" - " iter != " << collectorName << ".end(); ) {\n" - " delete *iter;\n" - " " << collectorName << ".erase(iter++);\n" - " anyDeleted = true;\n" - " } }\n"; - } - wrapperFile.oss << - " if(anyDeleted)\n" - " cout <<\n" - " \"WARNING: Wrap modules with variables in the workspace have been reloaded due to\\n\"\n" - " \"calling destructors, call 'clear all' again if you plan to now recompile a wrap\\n\"\n" - " \"module, so that your recompiled module is used instead of the old one.\" << endl;\n" - " std::cout.rdbuf(outbuf);\n" // Restore cout - "}\n\n"; -} - -/* ************************************************************************* */ -void Module::WriteRTTIRegistry(FileWriter& wrapperFile, const std::string& moduleName, const std::vector& classes) { - wrapperFile.oss << - "void _" << moduleName << "_RTTIRegister() {\n" - " const mxArray *alreadyCreated = mexGetVariablePtr(\"global\", \"gtsam_" + moduleName + "_rttiRegistry_created\");\n" - " if(!alreadyCreated) {\n" - " std::map types;\n"; - for(const Class& cls: classes) { - if(cls.isVirtual) - wrapperFile.oss << - " types.insert(std::make_pair(typeid(" << cls.qualifiedName("::") << ").name(), \"" << cls.qualifiedName(".") << "\"));\n"; - } - wrapperFile.oss << "\n"; - - wrapperFile.oss << - " mxArray *registry = mexGetVariable(\"global\", \"gtsamwrap_rttiRegistry\");\n" - " if(!registry)\n" - " registry = mxCreateStructMatrix(1, 1, 0, NULL);\n" - " typedef std::pair StringPair;\n" - " for(const StringPair& rtti_matlab: types) {\n" - " int fieldId = mxAddField(registry, rtti_matlab.first.c_str());\n" - " if(fieldId < 0)\n" - " mexErrMsgTxt(\"gtsam wrap: Error indexing RTTI types, inheritance will not work correctly\");\n" - " mxArray *matlabName = mxCreateString(rtti_matlab.second.c_str());\n" - " mxSetFieldByNumber(registry, 0, fieldId, matlabName);\n" - " }\n" - " if(mexPutVariable(\"global\", \"gtsamwrap_rttiRegistry\", registry) != 0)\n" - " mexErrMsgTxt(\"gtsam wrap: Error indexing RTTI types, inheritance will not work correctly\");\n" - " mxDestroyArray(registry);\n" - " \n" - " mxArray *newAlreadyCreated = mxCreateNumericMatrix(0, 0, mxINT8_CLASS, mxREAL);\n" - " if(mexPutVariable(\"global\", \"gtsam_" + moduleName + "_rttiRegistry_created\", newAlreadyCreated) != 0)\n" - " mexErrMsgTxt(\"gtsam wrap: Error indexing RTTI types, inheritance will not work correctly\");\n" - " mxDestroyArray(newAlreadyCreated);\n" - " }\n" - "}\n" - "\n"; -} - -/* ************************************************************************* */ -void Module::generate_python_wrapper(const string& toolboxPath) const { - - fs::create_directories(toolboxPath); - - // create the unified .cpp switch file - const string wrapperName = name + "_python"; - string wrapperFileName = toolboxPath + "/" + wrapperName + ".cpp"; - FileWriter wrapperFile(wrapperFileName, verbose, "//"); - wrapperFile.oss << "#include \n\n"; - wrapperFile.oss << "using namespace boost::python;\n"; - wrapperFile.oss << "BOOST_PYTHON_MODULE(" + name + ")\n"; - wrapperFile.oss << "{\n"; - - // write out classes - for(const Class& cls: expandedClasses) { - cls.python_wrapper(wrapperFile); - } - - // write out global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.python_wrapper(wrapperFile); - - // finish wrapper file - wrapperFile.oss << "}\n"; - - wrapperFile.emit(true); -} - -/* ************************************************************************* */ diff --git a/wrap/Module.h b/wrap/Module.h deleted file mode 100644 index 2a8344551..000000000 --- a/wrap/Module.h +++ /dev/null @@ -1,95 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Module.h - * @brief describes module to be wrapped - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#pragma once - -#include "Class.h" -#include "GlobalFunction.h" -#include "TemplateInstantiationTypedef.h" -#include "ForwardDeclaration.h" -#include "TypedefPair.h" - -#include -#include -#include - -namespace wrap { - -/** - * A module just has a name and a list of classes - */ -struct Module { - - // Filled during parsing: - std::string name; ///< module name - bool verbose; ///< verbose flag - std::vector classes; ///< list of classes - std::vector uninstantiatedClasses; ///< list of template classes after instantiated - std::vector templateInstantiationTypedefs; ///< list of template instantiations - std::vector forward_declarations; - std::vector includes; ///< Include statements - GlobalFunctions global_functions; - std::vector typedefs; - - // After parsing: - std::vector expandedClasses; - bool hasSerialiable; - TypeAttributesTable typeAttributes; - - /// constructor that parses interface file - Module(const std::string& interfacePath, const std::string& moduleName, - bool enable_verbose = true); - - /// Dummy constructor that does no parsing - use only for testing - Module(const std::string& moduleName, bool enable_verbose = true); - - /// non-const function that performs parsing - typically called by constructor - /// Throws exception on failure - void parseMarkup(const std::string& data); - - /// MATLAB code generation: - void generate_matlab_wrapper(const std::string& path) const; - - /// Cython code generation: - void generate_cython_wrapper(const std::string& path, const std::string& pxdImports = "") const; - void emit_cython_pxd(FileWriter& file) const; - void emit_cython_pyx(FileWriter& file) const; - - void generateIncludes(FileWriter& file) const; - - void finish_wrapper(FileWriter& file, - const std::vector& functionNames) const; - - /// Python code generation: - void generate_python_wrapper(const std::string& path) const; - -private: - static std::vector ExpandTypedefInstantiations( - const std::vector& classes, - const std::vector instantiations); - static std::vector GenerateValidTypes( - const std::vector& classes, - const std::vector& forwardDeclarations, - const std::vector& typedefs); - static void WriteCollectorsAndCleanupFcn(FileWriter& wrapperFile, - const std::string& moduleName, const std::vector& classes); - static void WriteRTTIRegistry(FileWriter& wrapperFile, - const std::string& moduleName, const std::vector& classes); -}; - -} // \namespace wrap diff --git a/wrap/OverloadedFunction.h b/wrap/OverloadedFunction.h deleted file mode 100644 index 6bcb72d94..000000000 --- a/wrap/OverloadedFunction.h +++ /dev/null @@ -1,140 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file OverloadedFunction.h - * @brief Function that can overload its arguments only - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#pragma once - -#include "Function.h" -#include "Argument.h" -#include -namespace wrap { - -/** - * ArgumentList Overloads - */ -class ArgumentOverloads { -public: - std::vector argLists_; - -public: - size_t nrOverloads() const { return argLists_.size(); } - - const ArgumentList& argumentList(size_t i) const { return argLists_.at(i); } - - void push_back(const ArgumentList& args) { argLists_.push_back(args); } - - std::vector expandArgumentListsTemplate( - const TemplateSubstitution& ts) const { - std::vector result; - for (const ArgumentList& argList : argLists_) { - ArgumentList instArgList = argList.expandTemplate(ts); - result.push_back(instArgList); - } - return result; - } - - /// Expand templates, imperative ! - virtual void ExpandTemplate(const TemplateSubstitution& ts) { - argLists_ = expandArgumentListsTemplate(ts); - } - - void verifyArguments(const std::vector& validArgs, - const std::string s) const { - for (const ArgumentList& argList : argLists_) { - for (Argument arg : argList) { - std::string fullType = arg.type.qualifiedName("::"); - if (find(validArgs.begin(), validArgs.end(), fullType) == - validArgs.end()) - throw DependencyMissing(fullType, "checking argument of " + s); - } - } - } - - friend std::ostream& operator<<(std::ostream& os, - const ArgumentOverloads& overloads) { - for (const ArgumentList& argList : overloads.argLists_) - os << argList << std::endl; - return os; - } - - std::string pyx_resolveOverloadParams(const ArgumentList& args, bool isVoid, - size_t indentLevel = 2) const { - std::string indent; - for (size_t i = 0; i < indentLevel; ++i) - indent += " "; - std::string s; - s += indent + "__params = process_args([" + args.pyx_paramsList() - + "], args, kwargs)\n"; - s += args.pyx_castParamsToPythonType(indent); - if (args.size() > 0) { - for (size_t i = 0; i < args.size(); ++i) { - // For python types we can do the assert after the assignment and save list accesses - if (args[i].type.isNonBasicType() || args[i].type.isEigen()) { - std::string param = args[i].name; - s += indent + "assert isinstance(" + param + ", " - + args[i].type.pyxArgumentType() + ")"; - if (args[i].type.isEigen()) { - s += " and " + param + ".ndim == " - + ((args[i].type.pyxClassName() == "Vector") ? "1" : "2"); - } - s += "\n"; - } - } - } - return s; - } -}; - -class OverloadedFunction : public Function, public ArgumentOverloads { -public: - bool addOverload(const std::string& name, const ArgumentList& args, - boost::optional instName = boost::none, - bool verbose = false) { - bool first = initializeOrCheck(name, instName, verbose); - ArgumentOverloads::push_back(args); - return first; - } - -private: -}; - -// Templated checking functions -// TODO: do this via polymorphism, use transform ? - -template -static std::map expandMethodTemplate( - const std::map& methods, const TemplateSubstitution& ts) { - std::map result; - typedef std::pair NamedMethod; - for (NamedMethod namedMethod : methods) { - F instMethod = namedMethod.second; - instMethod.expandTemplate(ts); - namedMethod.second = instMethod; - result.insert(namedMethod); - } - return result; -} - -template -inline void verifyArguments(const std::vector& validArgs, - const std::map& vt) { - typedef typename std::map::value_type NamedMethod; - for (const NamedMethod& namedMethod : vt) - namedMethod.second.verifyArguments(validArgs); -} - -} // \namespace wrap diff --git a/wrap/Qualified.cpp b/wrap/Qualified.cpp deleted file mode 100644 index 947e51d54..000000000 --- a/wrap/Qualified.cpp +++ /dev/null @@ -1,5 +0,0 @@ -#include - -namespace wrap { - std::vector Qualified::BasicTypedefs; -} diff --git a/wrap/Qualified.h b/wrap/Qualified.h deleted file mode 100644 index 416db239d..000000000 --- a/wrap/Qualified.h +++ /dev/null @@ -1,370 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Qualified.h - * @brief Qualified name - * @author Frank Dellaert - * @date Nov 11, 2014 - **/ - -#pragma once - -#include -#include -#include -#include - -namespace wrap { - -/** - * Class to encapuslate a qualified name, i.e., with (nested) namespaces - */ -class Qualified { - -//protected: -public: - - std::vector namespaces_; ///< Stack of namespaces - std::string name_; ///< type name - static std::vector BasicTypedefs; - - friend struct TypeGrammar; - friend class TemplateSubstitution; - -public: - - /// the different categories - typedef enum { - CLASS = 1, EIGEN = 2, BASIS = 3, VOID = 4 - } Category; - Category category; - - /// Default constructor - Qualified() : - category(VOID) { - } - - /// Construct from name and optional category - Qualified(const std::string& n, Category c = CLASS) : - name_(n), category(c) { - } - - /// Construct from scoped name and optional category - Qualified(const std::string& ns1, const std::string& n, Category c = CLASS) : - name_(n), category(c) { - namespaces_.push_back(ns1); - } - - /// Construct from doubly scoped name and optional category - Qualified(const std::string& ns1, const std::string& ns2, - const std::string& n, Category c = CLASS) : - name_(n), category(c) { - namespaces_.push_back(ns1); - namespaces_.push_back(ns2); - } - - /// Construct from arbitrarily scoped name - Qualified(std::vector ns, const std::string& name) : - namespaces_(ns), name_(name), category(CLASS) { - } - - // Destructor - virtual ~Qualified() {} - - std::string name() const { - return name_; - } - - std::vector namespaces() const { - return namespaces_; - } - - // Qualified is 'abused' as template argument name as well - // this function checks whether *this matches with templateArg - bool match(const std::string& templateArg) const { - return (name_ == templateArg && namespaces_.empty()); //TODO && category == CLASS); - } - - bool match(const std::vector& templateArgs) const { - for(const std::string& s: templateArgs) - if (match(s)) return true; - return false; - } - - void rename(const Qualified& q) { - namespaces_ = q.namespaces_; - name_ = q.name_; - category = q.category; - } - - void expand(const std::string& expansion) { - name_ += expansion; - } - - bool operator==(const Qualified& other) const { - return namespaces_ == other.namespaces_ && name_ == other.name_ - && category == other.category; - } - - bool empty() const { - return namespaces_.empty() && name_.empty(); - } - - virtual void clear() { - namespaces_.clear(); - name_.clear(); - category = VOID; - } - - bool isScalar() const { - return (name() == "bool" || name() == "char" - || name() == "unsigned char" || name() == "int" - || name() == "size_t" || name() == "double"); - } - - bool isVoid() const { - return name() == "void"; - } - - bool isString() const { - return name() == "string"; - } - - bool isEigen() const { - return name() == "Vector" || name() == "Matrix"; - } - - bool isBasicTypedef() const { - return std::find(Qualified::BasicTypedefs.begin(), - Qualified::BasicTypedefs.end(), - *this) != Qualified::BasicTypedefs.end(); - } - - bool isNonBasicType() const { - return name() != "This" && !isString() && !isScalar() && !isEigen() && - !isVoid() && !isBasicTypedef(); - } - -public: - - static Qualified MakeClass(std::vector namespaces, - const std::string& name) { - return Qualified(namespaces, name); - } - - static Qualified MakeEigen(const std::string& name) { - return Qualified(name, EIGEN); - } - - static Qualified MakeBasis(const std::string& name) { - return Qualified(name, BASIS); - } - - static Qualified MakeVoid() { - return Qualified("void", VOID); - } - - /// Return a qualified namespace using given delimiter - std::string qualifiedNamespaces(const std::string& delimiter = "") const { - std::string result; - for (std::size_t i = 0; i < namespaces_.size(); ++i) - result += (namespaces_[i] + ((i VectorXd, Matrix --> MatrixXd - std::string pxdClassName() const { - if (isEigen()) - return name_ + "Xd"; - else if (isNonBasicType()) - return "C" + qualifiedName("_", 1); - else return name_; - } - - /// name of Python classes in pyx - /// They have the same name with the corresponding Cython classes in pxd - /// But note that they are different: These are Python classes in the pyx file - /// To refer to a Cython class in pyx, we need to add "pxd.", e.g. pxd.noiseModel_Gaussian - /// see the other function pxd_class_in_pyx for that purpose. - std::string pyxClassName() const { - if (isEigen()) - return name_; - else - return qualifiedName("_", 1); - } - - /// Python type of function arguments in pyx to interface with normal python scripts - /// Eigen types become np.ndarray (There's no Eigen types, e.g. VectorXd, in - /// Python. We have to pass in numpy array in the arguments, which will then be - /// converted to Eigen types in Cython) - std::string pyxArgumentType() const { - if (isEigen()) - return "np.ndarray"; - else - return qualifiedName("_", 1); - } - - /// return the Cython class in pxd corresponding to a Python class in pyx - std::string pxd_class_in_pyx() const { - if (isNonBasicType()) { - return pxdClassName(); - } else if (isEigen()) { - return name_ + "Xd"; - } else // basic types and not Eigen - return name_; - } - - /// the internal Cython shared obj in a Python class wrappper - std::string shared_pxd_obj_in_pyx() const { - return pxdClassName() + "_"; - } - - std::string make_shared_pxd_class_in_pyx() const { - return "make_shared[" + pxd_class_in_pyx() + "]"; - } - - std::string shared_pxd_class_in_pyx() const { - return "shared_ptr[" + pxd_class_in_pyx() + "]"; - } - - friend std::ostream& operator<<(std::ostream& os, const Qualified& q) { - os << q.qualifiedName("::"); - return os; - } -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct TypeGrammar: classic::grammar { - - wrap::Qualified& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - TypeGrammar(wrap::Qualified& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - typedef classic::rule Rule; - - Rule void_p, basisType_p, eigenType_p, namespace_del_p, class_p, type_p; - - definition(TypeGrammar const& self) { - - using namespace wrap; - using namespace classic; - typedef BasicRules Basic; - - // HACK: use const values instead of using enums themselves - somehow this doesn't result in values getting assigned to gibberish - static const Qualified::Category EIGEN = Qualified::EIGEN; - static const Qualified::Category BASIS = Qualified::BASIS; - static const Qualified::Category CLASS = Qualified::CLASS; - static const Qualified::Category VOID = Qualified::VOID; - - void_p = str_p("void") // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, VOID)]; - - basisType_p = Basic::basisType_p // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, BASIS)]; - - eigenType_p = Basic::eigenType_p // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, EIGEN)]; - - namespace_del_p = Basic::namespace_p // - [push_back_a(self.result_.namespaces_)] >> str_p("::"); - - class_p = *namespace_del_p >> Basic::className_p // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, CLASS)]; - - type_p = void_p | basisType_p | class_p | eigenType_p; - } - - Rule const& start() const { - return type_p; - } - - }; -}; -// type_grammar - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -template -struct TypeListGrammar: public classic::grammar > { - - typedef std::vector TypeList; - TypeList& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - TypeListGrammar(TypeList& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition { - - wrap::Qualified type; ///< temporary for use during parsing - TypeGrammar type_g; ///< Individual Type grammars - - classic::rule type_p, typeList_p; - - definition(TypeListGrammar const& self) : - type_g(type) { - using namespace classic; - - type_p = type_g[push_back_a(self.result_, type)][clear_a(type)]; - - typeList_p = OPEN >> !type_p >> *(',' >> type_p) >> CLOSE; - } - - classic::rule const& start() const { - return typeList_p; - } - - }; -}; -// TypeListGrammar - -/* ************************************************************************* */ -// Needed for other parsers in Argument.h and ReturnType.h -static const bool T = true; - -} // \namespace wrap - diff --git a/wrap/README.md b/wrap/README.md index 014577b5a..f72c3f652 100644 --- a/wrap/README.md +++ b/wrap/README.md @@ -1,27 +1,76 @@ -# WRAP README -The wrap library wraps the GTSAM library into a MATLAB toolbox. +# WRAP -It was designed to be more general than just wrapping GTSAM, but a small amount of GTSAM specific code exists in matlab.h, the include file that is included by the mex files. The GTSAM-specific functionality consists primarily of handling of Eigen Matrix and Vector classes. +The wrap library wraps the GTSAM library into a Python library or MATLAB toolbox. +It was designed to be more general than just wrapping GTSAM. For notes on creating a wrap interface, see `gtsam.h` for what features can be wrapped into a toolbox, as well as the current state of the toolbox for GTSAM. -For notes on creating a wrap interface, see gtsam.h for what features can be wrapped into a toolbox, as well as the current state of the toolbox for gtsam. For more technical details on the interface, please read comments in matlab.h +## Prerequisites: Pybind11 and pyparsing + +1. This library uses `pybind11`, which is included as a subdirectory in GTSAM. +2. The `interface_parser.py` in this library uses `pyparsing` to parse the interface file `gtsam.h`. Please install it first in your current Python environment before attempting the build. + ``` + python3 -m pip install pyparsing + ``` + +## GTSAM Python wrapper + +**WARNING: On macOS, you have to statically build GTSAM to use the wrapper.** + +1. Set `GTSAM_BUILD_PYTHON=ON` while configuring the build with `cmake`. +1. What you can do in the `build` folder: + 1. Just run python then import GTSAM and play around: + ``` + + import gtsam + gtsam.__dir__() + ``` + + 1. Run the unittests: + ``` + python -m unittest discover + ``` + 1. Edit the unittests in `python/gtsam/*.py` and simply rerun the test. + They were symlinked to `/gtsam/*.py` to facilitate fast development. + ``` + python -m unittest gtsam/tests/test_Pose3.py + ``` + - NOTE: You might need to re-run `cmake ..` if files are deleted or added. +1. Do `make install` and `cd /python`. Here, you can: + 1. Run the unittests: + ``` + python setup.py test + ``` + 2. Install `gtsam` to your current Python environment. + ``` + python setup.py install + ``` + - NOTE: It's a good idea to create a virtual environment otherwise it will be installed in your system Python's site-packages. + + +## Old GTSAM Wrapper + +*Outdated note from the original wrap.* + +TODO: Update this. + +It was designed to be more general than just wrapping GTSAM, but a small amount of GTSAM specific code exists in `matlab.h`, the include file that is included by the `mex` files. The GTSAM-specific functionality consists primarily of handling of Eigen Matrix and Vector classes. + +For notes on creating a wrap interface, see `gtsam.h` for what features can be wrapped into a toolbox, as well as the current state of the toolbox for GTSAM. For more technical details on the interface, please read comments in `matlab.h` Some good things to know: OBJECT CREATION -- Classes are created by special constructors, e.g., new_GaussianFactorGraph_.cpp. - These constructors are called from the MATLAB class @GaussianFactorGraph. - new_GaussianFactorGraph_ calls wrap_constructed in matlab.h, see documentation there - +- Classes are created by special constructors, e.g., `new_GaussianFactorGraph_.cpp`. + These constructors are called from the MATLAB class `@GaussianFactorGraph`. + `new_GaussianFactorGraph_` calls wrap_constructed in `matlab.h`, see documentation there + METHOD (AND CONSTRUCTOR) ARGUMENTS - Simple argument types of methods, such as "double", will be converted in the - mex wrappers by calling unwrap, defined in matlab.h + `mex` wrappers by calling unwrap, defined in matlab.h - Vector and Matrix arguments are normally passed by reference in GTSAM, but - in gtsam.h you need to pretend they are passed by value, to trigger the - generation of the correct conversion routines unwrap and unwrap + in `gtsam.h` you need to pretend they are passed by value, to trigger the + generation of the correct conversion routines `unwrap` and `unwrap` - passing classes as arguments works, provided they are passed by reference. This triggers a call to unwrap_shared_ptr - - \ No newline at end of file diff --git a/wrap/ReturnType.cpp b/wrap/ReturnType.cpp deleted file mode 100644 index fdf86d975..000000000 --- a/wrap/ReturnType.cpp +++ /dev/null @@ -1,101 +0,0 @@ -/** - * @file ReturnType.cpp - * @date Nov 13, 2014 - * @author Frank Dellaert - */ - -#include "ReturnType.h" -#include "Class.h" -#include "utilities.h" -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void ReturnType::wrap_result(const string& out, const string& result, - FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const { - string cppType = qualifiedName("::"), matlabType = qualifiedName("."); - - if (category == CLASS) { - // Handle Classes - string objCopy, ptrType; - const bool isVirtual = typeAttributes.attributes(cppType).isVirtual; - if (isPtr) - objCopy = result; // a shared pointer can always be passed as is - else { - // but if we want an actual new object, things get more complex - if (isVirtual) - // A virtual class needs to be cloned, so the whole hierarchy is - // returned - objCopy = result + ".clone()"; - else { - // ...but a non-virtual class can just be copied - objCopy = "boost::make_shared<" + cppType + ">(" + result + ")"; - } - } - // e.g. out[1] = wrap_shared_ptr(pairResult.second,"gtsam.Point3", false); - wrapperFile.oss << out << " = wrap_shared_ptr(" << objCopy << ",\"" - << matlabType << "\", " << (isVirtual ? "true" : "false") - << ");\n"; - - } else if (isPtr) { - // Handle shared pointer case for BASIS/EIGEN/VOID - // This case does not actually occur in GTSAM wrappers, so untested! - wrapperFile.oss << " {\n boost::shared_ptr<" << qualifiedName("::") - << "> shared(" << result << ");" << endl; - wrapperFile.oss << out << " = wrap_shared_ptr(shared,\"" << matlabType - << "\");\n }\n"; - - } else if (matlabType != "void") - // Handle normal case case for BASIS/EIGEN - wrapperFile.oss << out << " = wrap< " << qualifiedName("::") << " >(" << result - << ");\n"; -} - -/* ************************************************************************* */ -void ReturnType::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - string cythonType; - if (name() == "This") - cythonType = className; - else if (match(templateArgs)) - cythonType = name(); - else - cythonType = pxdClassName(); - if (isPtr) cythonType = "shared_ptr[" + cythonType + "]"; - file.oss << cythonType; -} - -/* ************************************************************************* */ -std::string ReturnType::pyx_returnType(bool addShared) const { - string retType = pxd_class_in_pyx(); - if (isPtr || (isNonBasicType() && addShared)) - retType = "shared_ptr[" + retType + "]"; - return retType; -} - -/* ************************************************************************* */ -std::string ReturnType::pyx_casting(const std::string& var, - bool isSharedVar) const { - if (isEigen()) { - string s = "ndarray_copy(" + var + ")"; - if (pyxClassName() == "Vector") - return s + ".squeeze()"; - else return s; - } - else if (isNonBasicType()) { - if (isPtr || isSharedVar) - return pyxClassName() + ".cyCreateFromShared(" + var + ")"; - else { - // construct a shared_ptr if var is not a shared ptr - return pyxClassName() + ".cyCreateFromShared(" + make_shared_pxd_class_in_pyx() + - + "(" + var + "))"; - } - } else - return var; -} - -/* ************************************************************************* */ diff --git a/wrap/ReturnType.h b/wrap/ReturnType.h deleted file mode 100644 index 8d78bb48f..000000000 --- a/wrap/ReturnType.h +++ /dev/null @@ -1,89 +0,0 @@ -/** - * @file ReturnValue.h - * @brief Encapsulates a return type of a method - * @date Nov 13, 2014 - * @author Frank Dellaert - */ - -#include "Qualified.h" -#include "FileWriter.h" -#include "TypeAttributesTable.h" -#include "utilities.h" -#include - -#pragma once - -namespace wrap { - -/** - * Encapsulates return value of a method or function - */ -struct ReturnType : public Qualified { - bool isPtr; - - friend struct ReturnValueGrammar; - - /// Makes a void type - ReturnType() : isPtr(false) {} - - /// Constructor, no namespaces - ReturnType(const std::string& name, Category c = CLASS, bool ptr = false) - : Qualified(name, c), isPtr(ptr) {} - - void clear() override { - Qualified::clear(); - isPtr = false; - } - - /// Check if this type is in a set of valid types - template - void verify(TYPES validtypes, const std::string& s) const { - std::string key = qualifiedName("::"); - if (find(validtypes.begin(), validtypes.end(), key) == validtypes.end()) - throw DependencyMissing(key, "checking return type of " + s); - } - - /// @param className the actual class name to use when "This" is specified - void emit_cython_pxd(FileWriter& file, const std::string& className, - const std::vector& templateArgs) const; - - std::string pyx_returnType(bool addShared = true) const; - std::string pyx_casting(const std::string& var, - bool isSharedVar = true) const; - -private: - friend struct ReturnValue; - - /// Example: out[1] = wrap_shared_ptr(pairResult.second,"Test", false); - void wrap_result(const std::string& out, const std::string& result, - FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const; -}; - -//****************************************************************************** -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ReturnTypeGrammar : public classic::grammar { - wrap::ReturnType& result_; ///< successful parse will be placed in here - - TypeGrammar type_g; - - /// Construct ReturnType grammar and specify where result is placed - ReturnTypeGrammar(wrap::ReturnType& result) - : result_(result), type_g(result_) {} - - /// Definition of type grammar - template - struct definition { - classic::rule type_p; - - definition(ReturnTypeGrammar const& self) { - using namespace classic; - type_p = self.type_g >> !ch_p('*')[assign_a(self.result_.isPtr, T)]; - } - - classic::rule const& start() const { return type_p; } - }; -}; -// ReturnTypeGrammar - -} // \namespace wrap diff --git a/wrap/ReturnValue.cpp b/wrap/ReturnValue.cpp deleted file mode 100644 index e58e85602..000000000 --- a/wrap/ReturnValue.cpp +++ /dev/null @@ -1,102 +0,0 @@ -/** - * @file ReturnValue.cpp - * @date Dec 1, 2011 - * @author Alex Cunningham - * @author Andrew Melim - * @author Richard Roberts - */ - -#include "ReturnValue.h" -#include "utilities.h" -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -ReturnValue ReturnValue::expandTemplate(const TemplateSubstitution& ts) const { - ReturnValue instRetVal = *this; - instRetVal.type1 = ts.tryToSubstitite(type1); - if (isPair) instRetVal.type2 = ts.tryToSubstitite(type2); - return instRetVal; -} - -/* ************************************************************************* */ -string ReturnValue::returnType() const { - if (isPair) - return "pair< " + type1.qualifiedName("::") + ", " + - type2.qualifiedName("::") + " >"; - else - return type1.qualifiedName("::"); -} - -/* ************************************************************************* */ -string ReturnValue::matlab_returnType() const { - return isPair ? "[first,second]" : "result"; -} - -/* ************************************************************************* */ -void ReturnValue::wrap_result(const string& result, FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const { - if (isPair) { - // For a pair, store the returned pair so we do not evaluate the function - // twice - wrapperFile.oss << " auto pairResult = " << result - << ";\n"; - type1.wrap_result(" out[0]", "pairResult.first", wrapperFile, - typeAttributes); - type2.wrap_result(" out[1]", "pairResult.second", wrapperFile, - typeAttributes); - } else { // Not a pair - type1.wrap_result(" out[0]", result, wrapperFile, typeAttributes); - } -} - -/* ************************************************************************* */ -void ReturnValue::emit_matlab(FileWriter& proxyFile) const { - string output; - if (isPair) - proxyFile.oss << "[ varargout{1} varargout{2} ] = "; - else if (type1.category != ReturnType::VOID) - proxyFile.oss << "varargout{1} = "; -} - -/* ************************************************************************* */ -void ReturnValue::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - if (isPair) { - file.oss << "pair["; - type1.emit_cython_pxd(file, className, templateArgs); - file.oss << ","; - type2.emit_cython_pxd(file, className, templateArgs); - file.oss << "] "; - } else { - type1.emit_cython_pxd(file, className, templateArgs); - file.oss << " "; - } -} - -/* ************************************************************************* */ -std::string ReturnValue::pyx_returnType() const { - if (isVoid()) return ""; - if (isPair) { - return "pair [" + type1.pyx_returnType(false) + "," + - type2.pyx_returnType(false) + "]"; - } else { - return type1.pyx_returnType(true); - } -} - -/* ************************************************************************* */ -std::string ReturnValue::pyx_casting(const std::string& var) const { - if (isVoid()) return ""; - if (isPair) { - return "(" + type1.pyx_casting(var + ".first", false) + "," + - type2.pyx_casting(var + ".second", false) + ")"; - } else { - return type1.pyx_casting(var); - } -} - -/* ************************************************************************* */ diff --git a/wrap/ReturnValue.h b/wrap/ReturnValue.h deleted file mode 100644 index 721132797..000000000 --- a/wrap/ReturnValue.h +++ /dev/null @@ -1,126 +0,0 @@ -/** - * @file ReturnValue.h - * - * @brief Encapsulates a return value from a method - * @date Dec 1, 2011 - * @author Alex Cunningham - * @author Richard Roberts - */ - -#include "ReturnType.h" -#include "TemplateSubstitution.h" -#include "FileWriter.h" -#include "TypeAttributesTable.h" -#include "utilities.h" - -#pragma once - -namespace wrap { - -/** - * Encapsulates return type of a method or function, possibly a pair - */ -struct ReturnValue { - - bool isPair; - ReturnType type1, type2; - - friend struct ReturnValueGrammar; - - /// Default constructor - ReturnValue() : - isPair(false) { - } - - /// Construct from type - ReturnValue(const ReturnType& type) : - isPair(false), type1(type) { - } - - /// Construct from pair type arguments - ReturnValue(const ReturnType& t1, const ReturnType& t2) : - isPair(true), type1(t1), type2(t2) { - } - - /// Destructor - virtual ~ReturnValue() {} - - virtual void clear() { - type1.clear(); - type2.clear(); - isPair = false; - } - - bool isVoid() const { - return !isPair && !type1.isPtr && (type1.name() == "void"); - } - - bool operator==(const ReturnValue& other) const { - return isPair == other.isPair && type1 == other.type1 - && type2 == other.type2; - } - - /// Substitute template argument - ReturnValue expandTemplate(const TemplateSubstitution& ts) const; - - std::string returnType() const; - - std::string matlab_returnType() const; - - void wrap_result(const std::string& result, FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const; - - void emit_matlab(FileWriter& proxyFile) const; - - /// @param className the actual class name to use when "This" is specified - void emit_cython_pxd(FileWriter& file, const std::string& className, - const std::vector& templateArgs) const; - std::string pyx_returnType() const; - std::string pyx_casting(const std::string& var) const; - - friend std::ostream& operator<<(std::ostream& os, const ReturnValue& r) { - if (!r.isPair && r.type1.category == ReturnType::VOID) - os << "void"; - else - os << r.returnType(); - return os; - } - -}; - -//****************************************************************************** -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ReturnValueGrammar: public classic::grammar { - - wrap::ReturnValue& result_; ///< successful parse will be placed in here - ReturnTypeGrammar returnType1_g, returnType2_g; ///< Type parsers - - /// Construct type grammar and specify where result is placed - ReturnValueGrammar(wrap::ReturnValue& result) : - result_(result), returnType1_g(result.type1), returnType2_g(result.type2) { - } - - /// Definition of type grammar - template - struct definition { - - classic::rule pair_p, returnValue_p; - - definition(ReturnValueGrammar const& self) { - using namespace classic; - - pair_p = (str_p("pair") >> '<' >> self.returnType1_g >> ',' - >> self.returnType2_g >> '>')[assign_a(self.result_.isPair, T)]; - - returnValue_p = pair_p | self.returnType1_g; - } - - classic::rule const& start() const { - return returnValue_p; - } - - }; -}; -// ReturnValueGrammar - -}// \namespace wrap diff --git a/wrap/StaticMethod.cpp b/wrap/StaticMethod.cpp deleted file mode 100644 index 0f812ea61..000000000 --- a/wrap/StaticMethod.cpp +++ /dev/null @@ -1,151 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file StaticMethod.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "StaticMethod.h" -#include "utilities.h" -#include "Class.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void StaticMethod::proxy_header(FileWriter& proxyFile) const { - string upperName = matlabName(); - upperName[0] = toupper(upperName[0], locale()); - proxyFile.oss << " function varargout = " << upperName << "(varargin)\n"; -} - -/* ************************************************************************* */ -string StaticMethod::wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, const ArgumentList& args) const { - // check arguments - // NOTE: for static functions, there is no object passed - wrapperFile.oss << " checkArguments(\"" << matlabUniqueName << "." << name_ - << "\",nargout,nargin," << args.size() << ");\n"; - - // unwrap arguments, see Argument.cpp - args.matlab_unwrap(wrapperFile, 0); // We start at 0 because there is no self object - - // call method and wrap result - // example: out[0]=wrap(staticMethod(t)); - string expanded = cppClassName + "::" + name_; - if (templateArgValue_) - expanded += ("<" + templateArgValue_->qualifiedName("::") + ">"); - - return expanded; -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_pxd(FileWriter& file, const Class& cls) const { - for(size_t i = 0; i < nrOverloads(); ++i) { - file.oss << " @staticmethod\n"; - file.oss << " "; - returnVals_[i].emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - file.oss << name_ + ((i>0)?"_" + to_string(i):"") << " \"" << name_ << "\"" << "("; - argumentList(i).emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - file.oss << ") except +\n"; - } -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_wrapper_pxd(FileWriter& file, - const Class& cls) const { - if (nrOverloads() > 1) { - for (size_t i = 0; i < nrOverloads(); ++i) { - string funcName = name_ + "_" + to_string(i); - file.oss << " @staticmethod\n"; - file.oss << " cdef tuple " + funcName + "(tuple args, dict kwargs)\n"; - } - } -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_pyx_no_overload(FileWriter& file, - const Class& cls) const { - assert(nrOverloads() == 1); - file.oss << " @staticmethod\n"; - file.oss << " def " << name_ << "("; - argumentList(0).emit_cython_pyx(file); - file.oss << "):\n"; - - /// Call cython corresponding function and return - file.oss << argumentList(0).pyx_convertEigenTypeAndStorageOrder(" "); - string call = pyx_functionCall(cls.pxd_class_in_pyx(), name_, 0); - file.oss << " "; - if (!returnVals_[0].isVoid()) { - file.oss << "return " << returnVals_[0].pyx_casting(call) << "\n"; - } else - file.oss << call << "\n"; - file.oss << "\n"; -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_pyx(FileWriter& file, const Class& cls) const { - size_t N = nrOverloads(); - if (N == 1) { - emit_cython_pyx_no_overload(file, cls); - return; - } - - // Dealing with overloads.. - file.oss << " @staticmethod # overloaded\n"; - file.oss << " def " << name_ << "(*args, **kwargs):\n"; - for (size_t i = 0; i < N; ++i) { - string funcName = name_ + "_" + to_string(i); - file.oss << " success, results = " << cls.pyxClassName() << "." - << funcName << "(args, kwargs)\n"; - file.oss << " if success:\n return results\n"; - } - file.oss << " raise TypeError('Could not find the correct overload')\n\n"; - - // Create cdef methods for all overloaded methods - for(size_t i = 0; i < N; ++i) { - string funcName = name_ + "_" + to_string(i); - file.oss << " @staticmethod\n"; - file.oss << " cdef tuple " + funcName + "(tuple args, dict kwargs):\n"; - file.oss << " cdef list __params\n"; - if (!returnVals_[i].isVoid()) { - file.oss << " cdef " << returnVals_[i].pyx_returnType() << " return_value\n"; - } - file.oss << " try:\n"; - ArgumentList args = argumentList(i); - file.oss << pyx_resolveOverloadParams(args, false, 3); - - /// Call cython corresponding function and return - file.oss << args.pyx_convertEigenTypeAndStorageOrder(" "); - string pxdFuncName = name_ + ((i>0)?"_" + to_string(i):""); - string call = pyx_functionCall(cls.pxd_class_in_pyx(), pxdFuncName, i); - if (!returnVals_[i].isVoid()) { - file.oss << " return_value = " << call << "\n"; - file.oss << " return True, " << returnVals_[i].pyx_casting("return_value") << "\n"; - } else { - file.oss << " " << call << "\n"; - file.oss << " return True, None\n"; - } - file.oss << " except:\n"; - file.oss << " return False, None\n\n"; - } -} - -/* ************************************************************************* */ diff --git a/wrap/StaticMethod.h b/wrap/StaticMethod.h deleted file mode 100644 index dbb918596..000000000 --- a/wrap/StaticMethod.h +++ /dev/null @@ -1,51 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file StaticMethod.h - * @brief describes and generates code for static methods - * @author Frank Dellaert - * @author Alex Cunningham - * @author Richard Roberts - **/ - -#pragma once - -#include "MethodBase.h" - -namespace wrap { - -/// StaticMethod class -struct StaticMethod: public MethodBase { - - typedef const std::string& Str; - - friend std::ostream& operator<<(std::ostream& os, const StaticMethod& m) { - for (size_t i = 0; i < m.nrOverloads(); i++) - os << "static " << m.returnVals_[i] << " " << m.name_ << m.argLists_[i]; - return os; - } - - void emit_cython_pxd(FileWriter& file, const Class& cls) const; - void emit_cython_wrapper_pxd(FileWriter& file, const Class& cls) const; - void emit_cython_pyx(FileWriter& file, const Class& cls) const; - void emit_cython_pyx_no_overload(FileWriter& file, const Class& cls) const; - -protected: - - void proxy_header(FileWriter& proxyFile) const override; - - std::string wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, const ArgumentList& args) const override; -}; - -} // \namespace wrap - diff --git a/wrap/Template.h b/wrap/Template.h deleted file mode 100644 index 32f8e9761..000000000 --- a/wrap/Template.h +++ /dev/null @@ -1,146 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Template.h - * @brief Template name - * @author Frank Dellaert - * @date Nov 11, 2014 - **/ - -#pragma once - -#include - -namespace wrap { - -/// The template specification that goes before a method or a class -class Template { - std::string argName_; - std::vector argValues_; - std::vector intList_; - friend struct TemplateGrammar; -public: - /// The only way to get values into a Template is via our friendly Grammar - Template() { - } - void clear() { - argName_.clear(); - argValues_.clear(); - intList_.clear(); - } - const std::string& argName() const { - return argName_; - } - const std::vector& intList() const { - return intList_; - } - const std::vector& argValues() const { - return argValues_; - } - bool empty() const { - return argValues_.empty() && intList_.empty(); - } - size_t nrValues() const { - return argValues_.size(); - } - const Qualified& operator[](size_t i) const { - return argValues_[i]; - } - bool valid() const { - return !argName_.empty() && argValues_.size() > 0; - } - -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct IntListGrammar: public classic::grammar { - - typedef std::vector IntList; - IntList& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - IntListGrammar(IntList& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition { - - classic::rule integer_p, intList_p; - - definition(IntListGrammar const& self) { - using namespace classic; - - integer_p = int_p[push_back_a(self.result_)]; - - intList_p = '{' >> !integer_p >> *(',' >> integer_p) >> '}'; - } - - classic::rule const& start() const { - return intList_p; - } - - }; -}; -// IntListGrammar - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct TemplateGrammar: public classic::grammar { - - Template& result_; ///< successful parse will be placed in here - TypeListGrammar<'{', '}'> argValues_g; ///< TypeList parser - IntListGrammar intList_g; ///< TypeList parser - - /// Construct type grammar and specify where result is placed - TemplateGrammar(Template& result) : - result_(result), argValues_g(result.argValues_), // - intList_g(result.intList_) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - classic::rule templateArgValues_p; - - definition(TemplateGrammar const& self) { - using classic::str_p; - using classic::assign_a; - templateArgValues_p = (str_p("template") >> '<' - >> (BasicRules::name_p)[assign_a(self.result_.argName_)] - >> '=' >> (self.argValues_g | self.intList_g) >> '>'); - } - - classic::rule const& start() const { - return templateArgValues_p; - } - - }; -}; -// TemplateGrammar - -/// Cool initializer for tests -static inline boost::optional