diff --git a/.github/scripts/python.sh b/.github/scripts/python.sh new file mode 100644 index 000000000..5cc6ada24 --- /dev/null +++ b/.github/scripts/python.sh @@ -0,0 +1,94 @@ +#!/bin/bash + +########################################################## +# Build and test the GTSAM Python wrapper. +########################################################## + +set -x -e + +if [ -z ${PYTHON_VERSION+x} ]; then + echo "Please provide the Python version to build against!" + exit 127 +fi + +if [ -z ${WRAPPER+x} ]; then + echo "Please provide the wrapper to build!" + exit 126 +fi + +PYTHON="python${PYTHON_VERSION}" + +if [[ $(uname) == "Darwin" ]]; then + brew install wget +else + # Install a system package required by our library + sudo apt-get install -y wget libicu-dev python3-pip python3-setuptools +fi + +PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin + +case $WRAPPER in +"cython") + BUILD_CYTHON="ON" + BUILD_PYBIND="OFF" + TYPEDEF_POINTS_TO_VECTORS="OFF" + + sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/cython/requirements.txt + ;; +"pybind") + BUILD_CYTHON="OFF" + BUILD_PYBIND="ON" + TYPEDEF_POINTS_TO_VECTORS="ON" + + sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt + ;; +*) + exit 126 + ;; +esac + +git submodule update --init --recursive + +mkdir $GITHUB_WORKSPACE/build +cd $GITHUB_WORKSPACE/build + +cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release \ + -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \ + -DGTSAM_USE_QUATERNIONS=OFF \ + -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ + -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ + -DGTSAM_INSTALL_CYTHON_TOOLBOX=${BUILD_CYTHON} \ + -DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \ + -DGTSAM_TYPEDEF_POINTS_TO_VECTORS=${TYPEDEF_POINTS_TO_VECTORS} \ + -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \ + -DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \ + -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \ + -DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install + +make -j$(nproc) install & + +while ps -p $! > /dev/null +do + sleep 60 + now=$(date +%s) + printf "%d seconds have elapsed\n" $(( (now - start) )) +done + +case $WRAPPER in +"cython") + cd $GITHUB_WORKSPACE/build/cython + $PYTHON setup.py install --user --prefix= + cd $GITHUB_WORKSPACE/build/cython/gtsam/tests + $PYTHON -m unittest discover + ;; +"pybind") + cd $GITHUB_WORKSPACE/python + $PYTHON setup.py install --user --prefix= + cd $GITHUB_WORKSPACE/wrap/python/gtsam_py/tests + $PYTHON -m unittest discover + ;; +*) + echo "THIS SHOULD NEVER HAPPEN!" + exit 125 + ;; +esac \ No newline at end of file diff --git a/.travis.sh b/.github/scripts/unix.sh old mode 100755 new mode 100644 similarity index 72% rename from .travis.sh rename to .github/scripts/unix.sh index 535a72f4b..aa6e49650 --- a/.travis.sh +++ b/.github/scripts/unix.sh @@ -1,20 +1,25 @@ #!/bin/bash +########################################################## +# Build and test GTSAM for *nix based systems. +# Specifically Linux and macOS. +########################################################## + # install TBB with _debug.so files function install_tbb() { TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download - TBB_VERSION=4.4.2 - TBB_DIR=tbb44_20151115oss + TBB_VERSION=4.4.5 + TBB_DIR=tbb44_20160526oss TBB_SAVEPATH="/tmp/tbb.tgz" - if [ "$TRAVIS_OS_NAME" == "linux" ]; then + if [ "$(uname)" == "Linux" ]; then OS_SHORT="lin" TBB_LIB_DIR="intel64/gcc4.4" SUDO="sudo" - elif [ "$TRAVIS_OS_NAME" == "osx" ]; then - OS_SHORT="lin" + elif [ "$(uname)" == "Darwin" ]; then + OS_SHORT="osx" TBB_LIB_DIR="" SUDO="" @@ -25,7 +30,7 @@ function install_tbb() TBBROOT=/tmp/$TBB_DIR # Copy the needed files to the correct places. - # This works correctly for travis builds, instead of setting path variables. + # This works correctly for CI builds, instead of setting path variables. # This is what Homebrew does to install TBB on Macs $SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/ $SUDO cp -R $TBBROOT/include/ /usr/local/include/ @@ -38,15 +43,15 @@ function configure() set -e # Make sure any error makes the script to return an error code set -x # echo - SOURCE_DIR=`pwd` - BUILD_DIR=build + SOURCE_DIR=$GITHUB_WORKSPACE + BUILD_DIR=$GITHUB_WORKSPACE/build #env - git clean -fd || true + git submodule update --init --recursive rm -fr $BUILD_DIR || true mkdir $BUILD_DIR && cd $BUILD_DIR - install_tbb + [ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb if [ ! -z "$GCC_VERSION" ]; then export CC=gcc-$GCC_VERSION @@ -61,9 +66,12 @@ function configure() -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \ -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \ - -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \ + -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=OFF + -DCMAKE_VERBOSE_MAKEFILE=ON \ + -DBOOST_ROOT=$BOOST_ROOT \ + -DBoost_NO_SYSTEM_PATHS=ON \ + -DBoost_ARCHITECTURE=-x64 } @@ -71,7 +79,7 @@ function configure() function finish () { # Print ccache stats - ccache -s + [ -x "$(command -v ccache)" ] && ccache -s cd $SOURCE_DIR } @@ -111,4 +119,4 @@ case $1 in -t) test ;; -esac +esac \ No newline at end of file diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml new file mode 100644 index 000000000..911bec59c --- /dev/null +++ b/.github/workflows/build-linux.yml @@ -0,0 +1,78 @@ +name: Linux CI + +on: [push, pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + ubuntu-18.04-gcc-5, + ubuntu-18.04-gcc-9, + ubuntu-18.04-clang-9, + ] + + build_type: [Debug, Release] + build_unstable: [ON] + include: + - name: ubuntu-18.04-gcc-5 + os: ubuntu-18.04 + compiler: gcc + version: "5" + + - name: ubuntu-18.04-gcc-9 + os: ubuntu-18.04 + compiler: gcc + version: "9" + + - name: ubuntu-18.04-clang-9 + os: ubuntu-18.04 + compiler: clang + version: "9" + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + fi + sudo apt-get -y update + + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy + + echo "::set-env name=BOOST_ROOT::$(echo $BOOST_ROOT_1_69_0)" + echo "::set-env name=LD_LIBRARY_PATH::$(echo $BOOST_ROOT_1_69_0/lib)" + + if [ "${{ matrix.compiler }}" = "gcc" ]; then + sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "::set-env name=CC::clang-${{ matrix.version }}" + echo "::set-env name=CXX::clang++-${{ matrix.version }}" + fi + - name: Check Boost version + if: runner.os == 'Linux' + run: | + echo "BOOST_ROOT = $BOOST_ROOT" + - name: Build (Linux) + if: runner.os == 'Linux' + run: | + bash .github/scripts/unix.sh -t \ No newline at end of file diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml new file mode 100644 index 000000000..55d9071ef --- /dev/null +++ b/.github/workflows/build-macos.yml @@ -0,0 +1,51 @@ +name: macOS CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + macOS-10.15-xcode-11.3.1, + ] + + build_type: [Debug, Release] + build_unstable: [ON] + include: + - name: macOS-10.15-xcode-11.3.1 + os: macOS-10.15 + compiler: xcode + version: "11.3.1" + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + if [ "${{ matrix.compiler }}" = "gcc" ]; then + brew install gcc@${{ matrix.version }} + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "::set-env name=CC::clang" + echo "::set-env name=CXX::clang++" + fi + - name: Build (macOS) + if: runner.os == 'macOS' + run: | + bash .github/scripts/unix.sh -t \ No newline at end of file diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml new file mode 100644 index 000000000..0b4a7f12f --- /dev/null +++ b/.github/workflows/build-python.yml @@ -0,0 +1,95 @@ +name: Python CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} Python ${{ matrix.python_version }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + PYTHON_VERSION: ${{ matrix.python_version }} + WRAPPER: ${{ matrix.wrapper }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + ubuntu-18.04-gcc-5, + ubuntu-18.04-gcc-9, + ubuntu-18.04-clang-9, + macOS-10.15-xcode-11.3.1, + ] + + build_type: [Debug, Release] + python_version: [3] + wrapper: [cython] + include: + - name: ubuntu-18.04-gcc-5 + os: ubuntu-18.04 + compiler: gcc + version: "5" + + - name: ubuntu-18.04-gcc-9 + os: ubuntu-18.04 + compiler: gcc + version: "9" + + - name: ubuntu-18.04-clang-9 + os: ubuntu-18.04 + compiler: clang + version: "9" + + - name: macOS-10.15-xcode-11.3.1 + os: macOS-10.15 + compiler: xcode + version: "11.3.1" + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + fi + sudo apt-get -y update + + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev + + if [ "${{ matrix.compiler }}" = "gcc" ]; then + sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "::set-env name=CC::clang-${{ matrix.version }}" + echo "::set-env name=CXX::clang++-${{ matrix.version }}" + fi + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + if [ "${{ matrix.compiler }}" = "gcc" ]; then + brew install gcc@${{ matrix.version }} + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "::set-env name=CC::clang" + echo "::set-env name=CXX::clang++" + fi + - name: Build (Linux) + if: runner.os == 'Linux' + run: | + bash .github/scripts/python.sh + - name: Build (macOS) + if: runner.os == 'macOS' + run: | + bash .github/scripts/python.sh \ No newline at end of file diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml new file mode 100644 index 000000000..b3150a751 --- /dev/null +++ b/.github/workflows/build-windows.yml @@ -0,0 +1,75 @@ +name: Windows CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + windows-2016-cl, + windows-2019-cl, + ] + + build_type: [Debug, Release] + build_unstable: [ON] + include: + - name: windows-2016-cl + os: windows-2016 + compiler: cl + + - name: windows-2019-cl + os: windows-2019 + compiler: cl + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Windows) + if: runner.os == 'Windows' + shell: powershell + run: | + Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh') + scoop install ninja --global + if ("${{ matrix.compiler }}".StartsWith("clang")) { + scoop install llvm --global + } + if ("${{ matrix.compiler }}" -eq "gcc") { + # Chocolatey GCC is broken on the windows-2019 image. + # See: https://github.com/DaanDeMeyer/doctest/runs/231595515 + # See: https://github.community/t5/GitHub-Actions/Something-is-wrong-with-the-chocolatey-installed-version-of-gcc/td-p/32413 + scoop install gcc --global + echo "::set-env name=CC::gcc" + echo "::set-env name=CXX::g++" + } elseif ("${{ matrix.compiler }}" -eq "clang") { + echo "::set-env name=CC::clang" + echo "::set-env name=CXX::clang++" + } else { + echo "::set-env name=CC::${{ matrix.compiler }}" + echo "::set-env name=CXX::${{ matrix.compiler }}" + } + # Scoop modifies the PATH so we make the modified PATH global. + echo "::set-env name=PATH::$env:PATH" + - name: Build (Windows) + if: runner.os == 'Windows' + run: | + cmake -E remove_directory build + echo "BOOST_ROOT_1_72_0: ${env:BOOST_ROOT_1_72_0}" + cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT_1_72_0}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT_1_72_0}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT_1_72_0}\lib" + cmake --build build --config ${{ matrix.build_type }} --target gtsam + cmake --build build --config ${{ matrix.build_type }} --target gtsam_unstable + cmake --build build --config ${{ matrix.build_type }} --target wrap + cmake --build build --config ${{ matrix.build_type }} --target check.base + cmake --build build --config ${{ matrix.build_type }} --target check.base_unstable + cmake --build build --config ${{ matrix.build_type }} --target check.linear \ No newline at end of file diff --git a/.github/workflows/trigger-python.yml b/.github/workflows/trigger-python.yml index 8fad9e7ca..94527e732 100644 --- a/.github/workflows/trigger-python.yml +++ b/.github/workflows/trigger-python.yml @@ -1,6 +1,9 @@ # This triggers Cython builds on `gtsam-manylinux-build` name: Trigger Python Builds -on: push +on: + push: + branches: + - develop jobs: triggerCython: runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 1d89cac25..c2d6ce60f 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ cython/gtsam_wrapper.pxd /CMakeSettings.json # for QtCreator: CMakeLists.txt.user* +xcode/ diff --git a/.travis.python.sh b/.travis.python.sh deleted file mode 100644 index 772311f38..000000000 --- a/.travis.python.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -set -x -e - -if [ -z ${PYTHON_VERSION+x} ]; then - echo "Please provide the Python version to build against!" - exit 127 -fi - -PYTHON="python${PYTHON_VERSION}" - -if [[ $(uname) == "Darwin" ]]; then - brew install wget -else - # Install a system package required by our library - sudo apt-get install wget libicu-dev python3-pip python3-setuptools -fi - -CURRDIR=$(pwd) - -sudo $PYTHON -m pip install -r ./cython/requirements.txt - -mkdir $CURRDIR/build -cd $CURRDIR/build - -cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \ - -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \ - -DGTSAM_USE_QUATERNIONS=OFF \ - -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ - -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ - -DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \ - -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \ - -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF \ - -DCMAKE_INSTALL_PREFIX=$CURRDIR/../gtsam_install - -make -j$(nproc) install - -cd cython - -sudo $PYTHON setup.py install - -cd $CURRDIR/cython/gtsam/tests - -$PYTHON -m unittest discover \ No newline at end of file diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index d8094ef4d..000000000 --- a/.travis.yml +++ /dev/null @@ -1,141 +0,0 @@ -language: cpp -cache: ccache -sudo: required -dist: xenial - -addons: - apt: - sources: - - ubuntu-toolchain-r-test - - sourceline: 'deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-9 main' - key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' - packages: - - g++-9 - - clang-9 - - build-essential pkg-config - - cmake - - python3-dev libpython-dev - - python3-numpy - - libboost-all-dev - -# before_install: - # - if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update; fi - -install: - - if [ "$TRAVIS_OS_NAME" == "osx" ]; then HOMEBREW_NO_AUTO_UPDATE=1 brew install ccache ; fi - - if [ "$TRAVIS_OS_NAME" == "osx" ]; then export PATH="/usr/local/opt/ccache/libexec:$PATH" ; fi - -# We first do the compile stage specified below, then the matrix expansion specified after. -stages: - - compile - - test - - special - -env: - global: - - MAKEFLAGS="-j2" - - CCACHE_SLOPPINESS=pch_defines,time_macros - -# Compile stage without building examples/tests to populate the caches. -jobs: -# -------- STAGE 1: COMPILE ----------- - include: -# on Mac, GCC - - stage: compile - os: osx - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: osx - compiler: gcc - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Mac, CLANG - - stage: compile - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Linux, GCC - - stage: compile - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Linux, CLANG - - stage: compile - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Linux, with deprecated ON to make sure that path still compiles/tests - - stage: special - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V4=ON - script: bash .travis.sh -b -# on Linux, with GTSAM_WITH_TBB on to make sure GTSAM still compiles/tests - - stage: special - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_WITH_TBB=ON - script: bash .travis.sh -t -# -------- STAGE 2: TESTS ----------- -# on Mac, GCC - - stage: test - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -t - - stage: test - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -t - - stage: test - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -t - - stage: test - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -t - - stage: test - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -t -# on Linux, with quaternions ON to make sure that path still compiles/tests - - stage: special - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release GTSAM_BUILD_UNSTABLE=OFF GTSAM_USE_QUATERNIONS=ON - script: bash .travis.sh -t - - stage: special - os: linux - compiler: gcc - env: PYTHON_VERSION=3 - script: bash .travis.python.sh - - stage: special - os: osx - compiler: clang - env: PYTHON_VERSION=3 - script: bash .travis.python.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index edefbf2ea..d2b9bc75e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -75,7 +75,7 @@ option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TB option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF) option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON) -option(GTSAM_ALLOW_DEPRECATED_SINCE_V4 "Allow use of methods/functions deprecated in GTSAM 4" ON) +option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON) option(GTSAM_TYPEDEF_POINTS_TO_VECTORS "Typedef Point2 and Point3 to Eigen::Vector equivalents" OFF) option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) @@ -509,104 +509,104 @@ set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.43)") #Example: "libc6 (>= # Print configuration variables message(STATUS "===============================================================") message(STATUS "================ Configuration Options ======================") -message(STATUS " CMAKE_CXX_COMPILER_ID type : ${CMAKE_CXX_COMPILER_ID}") -message(STATUS " CMAKE_CXX_COMPILER_VERSION : ${CMAKE_CXX_COMPILER_VERSION}") -message(STATUS " CMake version : ${CMAKE_VERSION}") -message(STATUS " CMake generator : ${CMAKE_GENERATOR}") -message(STATUS " CMake build tool : ${CMAKE_BUILD_TOOL}") +print_config("CMAKE_CXX_COMPILER_ID type" "${CMAKE_CXX_COMPILER_ID}") +print_config("CMAKE_CXX_COMPILER_VERSION" "${CMAKE_CXX_COMPILER_VERSION}") +print_config("CMake version" "${CMAKE_VERSION}") +print_config("CMake generator" "${CMAKE_GENERATOR}") +print_config("CMake build tool" "${CMAKE_BUILD_TOOL}") message(STATUS "Build flags ") -print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ") -print_config_flag(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all' ") -print_config_flag(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'") +print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests") +print_enabled_config(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all'") +print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'") if (DOXYGEN_FOUND) - print_config_flag(${GTSAM_BUILD_DOCS} "Build Docs ") + print_enabled_config(${GTSAM_BUILD_DOCS} "Build Docs") endif() -print_config_flag(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries ") -print_config_flag(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name ") +print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries") +print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name") if(GTSAM_UNSTABLE_AVAILABLE) - print_config_flag(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ") + print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable") endif() if(NOT MSVC AND NOT XCODE_VERSION) - print_config_flag(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ") - message(STATUS " Build type : ${CMAKE_BUILD_TYPE}") - message(STATUS " C compilation flags : ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") - message(STATUS " C++ compilation flags : ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") + print_enabled_config(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ") + print_config("Build type" "${CMAKE_BUILD_TYPE}") + print_config("C compilation flags" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") + print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") endif() print_build_options_for_target(gtsam) -message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") +print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") if(GTSAM_USE_TBB) - message(STATUS " Use Intel TBB : Yes") + print_config("Use Intel TBB" "Yes") elseif(TBB_FOUND) - message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled") + print_config("Use Intel TBB" "TBB found but GTSAM_WITH_TBB is disabled") else() - message(STATUS " Use Intel TBB : TBB not found") + print_config("Use Intel TBB" "TBB not found") endif() if(GTSAM_USE_EIGEN_MKL) - message(STATUS " Eigen will use MKL : Yes") + print_config("Eigen will use MKL" "Yes") elseif(MKL_FOUND) - message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled") + print_config("Eigen will use MKL" "MKL found but GTSAM_WITH_EIGEN_MKL is disabled") else() - message(STATUS " Eigen will use MKL : MKL not found") + print_config("Eigen will use MKL" "MKL not found") endif() if(GTSAM_USE_EIGEN_MKL_OPENMP) - message(STATUS " Eigen will use MKL and OpenMP : Yes") + print_config("Eigen will use MKL and OpenMP" "Yes") elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL) - message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") + print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") elseif(OPENMP_FOUND AND NOT MKL_FOUND) - message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found") + print_config("Eigen will use MKL and OpenMP" "OpenMP found but MKL not found") elseif(OPENMP_FOUND) - message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") + print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") else() - message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found") + print_config("Eigen will use MKL and OpenMP" "OpenMP not found") endif() -message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}") +print_config("Default allocator" "${GTSAM_DEFAULT_ALLOCATOR}") if(GTSAM_THROW_CHEIRALITY_EXCEPTION) - message(STATUS " Cheirality exceptions enabled : YES") + print_config("Cheirality exceptions enabled" "YES") else() - message(STATUS " Cheirality exceptions enabled : NO") + print_config("Cheirality exceptions enabled" "NO") endif() if(NOT MSVC AND NOT XCODE_VERSION) if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) - message(STATUS " Build with ccache : Yes") + print_config("Build with ccache" "Yes") elseif(CCACHE_FOUND) - message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") + print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") else() - message(STATUS " Build with ccache : No") + print_config("Build with ccache" "No") endif() endif() -message(STATUS "Packaging flags ") -message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}") -message(STATUS " CPack Generator : ${CPACK_GENERATOR}") +message(STATUS "Packaging flags") +print_config("CPack Source Generator" "${CPACK_SOURCE_GENERATOR}") +print_config("CPack Generator" "${CPACK_GENERATOR}") message(STATUS "GTSAM flags ") -print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ") -print_config_flag(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ") -print_config_flag(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") -print_config_flag(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") -print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4} "Deprecated in GTSAM 4 allowed ") -print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ") -print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ") -print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration") -print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ") +print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ") +print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ") +print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") +print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") +print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1") +print_enabled_config(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ") +print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ") +print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration") +print_enabled_config(${GTSAM_BUILD_WRAP} "Build Wrap ") -message(STATUS "MATLAB toolbox flags ") -print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ") +message(STATUS "MATLAB toolbox flags") +print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ") if (${GTSAM_INSTALL_MATLAB_TOOLBOX}) - message(STATUS " MATLAB root : ${MATLAB_ROOT}") - message(STATUS " MEX binary : ${MEX_COMMAND}") + print_config("MATLAB root" "${MATLAB_ROOT}") + print_config("MEX binary" "${MEX_COMMAND}") endif() message(STATUS "Cython toolbox flags ") -print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ") +print_enabled_config(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ") if(GTSAM_INSTALL_CYTHON_TOOLBOX) - message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}") + print_config("Python version" "${GTSAM_PYTHON_VERSION}") endif() message(STATUS "===============================================================") diff --git a/LICENSE b/LICENSE index d828deb55..228a6b942 100644 --- a/LICENSE +++ b/LICENSE @@ -23,3 +23,5 @@ ordering library - Included unmodified in gtsam/3rdparty/metis - Licenced under Apache License v 2.0, provided in gtsam/3rdparty/metis/LICENSE.txt +- Spectra v0.9.0: Sparse Eigenvalue Computation Toolkit as a Redesigned ARPACK. + - Licenced under MPL2, provided at https://github.com/yixuan/spectra diff --git a/README.md b/README.md index 093e35f0f..015d65e3d 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # README - Georgia Tech Smoothing and Mapping Library +**As of August 1, develop is officially in "Pre 4.1" mode, and features deprecated in 4.0 were removed. Use the last 4.0.3 release if you need those features. However, most are easily converted and can be tracked down (in 4.0.3) by disabling the cmake flag GTSAM_ALLOW_DEPRECATED_SINCE_V4** + ## What is GTSAM? GTSAM is a C++ library that implements smoothing and @@ -44,9 +46,12 @@ Optional prerequisites - used automatically if findable by CMake: ## GTSAM 4 Compatibility -GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. We also deprecate some legacy functionality and wrongly named methods, but by default the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 is enabled, allowing anyone to just pull V4 and compile. To build the python toolbox, however, you will have to explicitly disable that flag. +GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. It also introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect. + +GTSAM 4 also deprecated some legacy functionality and wrongly named methods. If you are on a 4.0.X release, you can define the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 to use the deprecated methods. + +GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionality. There is a flag GTSAM_ALLOW_DEPRECATED_SINCE_V41 for newly deprecated methods since the 4.1 release, which is on by default, allowing anyone to just pull version 4.1 and compile. -Also, GTSAM 4 introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect. ## Wrappers diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 3747354cf..000000000 --- a/appveyor.yml +++ /dev/null @@ -1,33 +0,0 @@ -# version format -version: 4.0.3-{branch}-build{build} - -os: Visual Studio 2019 - -clone_folder: c:\projects\gtsam - -platform: x64 -configuration: Release - -environment: - CTEST_OUTPUT_ON_FAILURE: 1 - BOOST_ROOT: C:/Libraries/boost_1_71_0 - -build_script: - - cd c:\projects\gtsam\build - # As of Dec 2019, not all unit tests build cleanly for MSVC, so we'll just - # check that parts of GTSAM build correctly: - #- cmake --build . - - cmake --build . --config Release --target gtsam - - cmake --build . --config Release --target gtsam_unstable - - cmake --build . --config Release --target wrap - #- cmake --build . --target check - - cmake --build . --config Release --target check.base - - cmake --build . --config Release --target check.base_unstable - - cmake --build . --config Release --target check.linear - -before_build: - - cd c:\projects\gtsam - - mkdir build - - cd build - # Disable examples to avoid AppVeyor timeout - - cmake -G "Visual Studio 16 2019" .. -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF diff --git a/cmake/GtsamBuildTypes.cmake b/cmake/GtsamBuildTypes.cmake index bffe97904..53dacd3f5 100644 --- a/cmake/GtsamBuildTypes.cmake +++ b/cmake/GtsamBuildTypes.cmake @@ -104,7 +104,14 @@ if(MSVC) set(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING /MD /O2 CACHE STRING "(User editable) Private compiler flags for Timing configuration.") else() # Common to all configurations, next for each configuration: - + + if ( + ((CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12.0.0)) OR + (CMAKE_CXX_COMPILER_ID MATCHES "GNU") + ) + set(flag_override_ -Wsuggest-override) # -Werror=suggest-override: Add again someday + endif() + set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON -Wall # Enable common warnings -fPIC # ensure proper code generation for shared libraries @@ -112,7 +119,7 @@ else() $<$:-Wreturn-stack-address -Werror=return-stack-address> # Error: return local address -Wreturn-type -Werror=return-type # Error on missing return() -Wformat -Werror=format-security # Error on wrong printf() arguments - $<$:-Wsuggest-override -Werror=suggest-override> # Enforce the use of the override keyword + $<$:${flag_override_}> # Enforce the use of the override keyword # CACHE STRING "(User editable) Private compiler flags for all configurations.") set(GTSAM_COMPILE_OPTIONS_PRIVATE_DEBUG -g -fno-inline CACHE STRING "(User editable) Private compiler flags for Debug configuration.") diff --git a/cmake/GtsamPrinting.cmake b/cmake/GtsamPrinting.cmake index e53f9c54f..5757f5ee1 100644 --- a/cmake/GtsamPrinting.cmake +++ b/cmake/GtsamPrinting.cmake @@ -1,14 +1,3 @@ -# print configuration variables -# Usage: -#print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ") -function(print_config_flag flag msg) - if (flag) - message(STATUS " ${msg}: Enabled") - else () - message(STATUS " ${msg}: Disabled") - endif () -endfunction() - # Based on https://github.com/jimbraun/XCDF/blob/master/cmake/CMakePadString.cmake function(string_pad RESULT_NAME DESIRED_LENGTH VALUE) string(LENGTH "${VALUE}" VALUE_LENGTH) @@ -26,6 +15,27 @@ endfunction() set(GTSAM_PRINT_SUMMARY_PADDING_LENGTH 50 CACHE STRING "Padding of cmake summary report lines after configuring.") mark_as_advanced(GTSAM_PRINT_SUMMARY_PADDING_LENGTH) +# print configuration variables with automatic padding +# Usage: +# print_config(${GTSAM_BUILD_TESTS} "Build Tests") +function(print_config config msg) + string_pad(padded_config ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${config}") + message(STATUS "${padded_config}: ${msg}") +endfunction() + +# print configuration variable with enabled/disabled value +# Usage: +# print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests ") +function(print_enabled_config config msg) + string_pad(padded_msg ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${msg}") + if (config) + message(STATUS "${padded_msg}: Enabled") + else () + message(STATUS "${padded_msg}: Disabled") + endif () +endfunction() + + # Print " var: ${var}" padding with spaces as needed function(print_padded variable_name) string_pad(padded_prop ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${variable_name}") diff --git a/cython/gtsam/tests/test_FrobeniusFactor.py b/cython/gtsam/tests/test_FrobeniusFactor.py index f3f5354bb..e808627f5 100644 --- a/cython/gtsam/tests/test_FrobeniusFactor.py +++ b/cython/gtsam/tests/test_FrobeniusFactor.py @@ -13,7 +13,7 @@ import unittest import numpy as np from gtsam import (Rot3, SO3, SO4, FrobeniusBetweenFactorSO4, FrobeniusFactorSO4, - FrobeniusWormholeFactor, SOn) + ShonanFactor3, SOn) id = SO4() v1 = np.array([0, 0, 0, 0.1, 0, 0]) @@ -43,7 +43,7 @@ class TestFrobeniusFactorSO4(unittest.TestCase): """Test creation of a factor that calculates Shonan error.""" R1 = SO3.Expmap(v1[3:]) R2 = SO3.Expmap(v2[3:]) - factor = FrobeniusWormholeFactor(1, 2, Rot3(R1.between(R2).matrix()), p=4) + factor = ShonanFactor3(1, 2, Rot3(R1.between(R2).matrix()), p=4) I4 = SOn(4) Q1 = I4.retract(v1) Q2 = I4.retract(v2) diff --git a/cython/gtsam/utils/plot.py b/cython/gtsam/utils/plot.py index 1e976a69e..77031eec4 100644 --- a/cython/gtsam/utils/plot.py +++ b/cython/gtsam/utils/plot.py @@ -155,7 +155,6 @@ def plot_pose2(fignum, pose, axis_length=0.1, covariance=None, axes.set_xlabel(axis_labels[0]) axes.set_ylabel(axis_labels[1]) - axes.set_zlabel(axis_labels[2]) return fig diff --git a/cython/gtsam/utils/visual_data_generator.py b/cython/gtsam/utils/visual_data_generator.py index f04588e70..5ce72fe68 100644 --- a/cython/gtsam/utils/visual_data_generator.py +++ b/cython/gtsam/utils/visual_data_generator.py @@ -30,8 +30,8 @@ class GroundTruth: def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): self.K = K - self.cameras = [Pose3()] * nrCameras - self.points = [Point3()] * nrPoints + self.cameras = [gtsam.Pose3()] * nrCameras + self.points = [gtsam.Point3(0, 0, 0)] * nrPoints def print_(self, s=""): print(s) @@ -99,11 +99,11 @@ def generate_data(options): r = 40 for i in range(options.nrCameras): theta = i * 2 * np.pi / options.nrCameras - t = Point3(r * np.cos(theta), r * np.sin(theta), height) - truth.cameras[i] = PinholeCameraCal3_S2.Lookat(t, - Point3(), - Point3(0, 0, 1), - truth.K) + t = gtsam.Point3(r * np.cos(theta), r * np.sin(theta), height) + truth.cameras[i] = gtsam.SimpleCamera.Lookat(t, + gtsam.Point3(0, 0, 0), + gtsam.Point3(0, 0, 1), + truth.K) # Create measurements for j in range(nrPoints): # All landmarks seen in every frame diff --git a/examples/Data/Klaus3.g2o b/examples/Data/Klaus3.g2o index 4c7233fa7..83a6e6fd2 100644 --- a/examples/Data/Klaus3.g2o +++ b/examples/Data/Klaus3.g2o @@ -1,6 +1,6 @@ -VERTEX_SE3:QUAT 0 -3.865747774038187 0.06639337702667497 -0.16064874691945374 0.024595211709139555 0.49179523413089893 -0.06279232989379242 0.8680954132776109 -VERTEX_SE3:QUAT 1 -3.614793159814815 0.04774490041587656 -0.2837650367985949 0.00991721787943912 0.4854918961891193 -0.042343290945895576 0.8731588132957809 -VERTEX_SE3:QUAT 2 -3.255096913553434 0.013296754286114112 -0.5339792269680574 -0.027851108010665374 0.585478168397957 -0.05088341463532465 0.8086102325762403 -EDGE_SE3:QUAT 0 1 0.2509546142233723 -0.01864847661079841 -0.12311628987914114 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 -EDGE_SE3:QUAT 0 2 0.6106508604847534 -0.05309662274056086 -0.3733304800486037 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 -EDGE_SE3:QUAT 1 2 0.3596962462613811 -0.03444814612976245 -0.25021419016946256 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +VERTEX_SE3:QUAT 0 -1.6618596980158338 -0.5736497760548741 -3.3319774096611026 -0.02676080288219576 -0.024497002638379624 -0.015064701622500615 0.9992281076190063 +VERTEX_SE3:QUAT 1 -1.431820463019384 -0.549139761976065 -3.160677992237872 -0.049543805396343954 -0.03232420352077356 -0.004386230477751116 0.998239108728862 +VERTEX_SE3:QUAT 2 -1.0394840214436651 -0.5268841046291037 -2.972143862665523 -0.07993768981394891 0.0825062894866454 -0.04088089479075661 0.9925378735259738 +EDGE_SE3:QUAT 0 1 0.23003923499644974 0.02451001407880915 0.17129941742323052 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +EDGE_SE3:QUAT 0 2 0.6223756765721686 0.04676567142577037 0.35983354699557957 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +EDGE_SE3:QUAT 1 2 0.3923364415757189 0.022255657346961222 0.18853412957234905 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 diff --git a/examples/Data/example_with_vertices.g2o b/examples/Data/example_with_vertices.g2o index 6c2f1a530..ca7cd86df 100644 --- a/examples/Data/example_with_vertices.g2o +++ b/examples/Data/example_with_vertices.g2o @@ -1,16 +1,16 @@ -VERTEX_SE3:QUAT 8646911284551352320 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162 -VERTEX_SE3:QUAT 8646911284551352321 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508 -VERTEX_SE3:QUAT 8646911284551352322 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10 -VERTEX_SE3:QUAT 8646911284551352323 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508 -VERTEX_SE3:QUAT 8646911284551352324 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162 -VERTEX_SE3:QUAT 8646911284551352325 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567 -VERTEX_SE3:QUAT 8646911284551352326 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412 -VERTEX_SE3:QUAT 8646911284551352327 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567 -VERTEX_TRACKXYZ 7782220156096217088 10 10 10 -VERTEX_TRACKXYZ 7782220156096217089 -10 10 10 -VERTEX_TRACKXYZ 7782220156096217090 -10 -10 10 -VERTEX_TRACKXYZ 7782220156096217091 10 -10 10 -VERTEX_TRACKXYZ 7782220156096217092 10 10 -10 -VERTEX_TRACKXYZ 7782220156096217093 -10 10 -10 -VERTEX_TRACKXYZ 7782220156096217094 -10 -10 -10 -VERTEX_TRACKXYZ 7782220156096217095 10 -10 -10 +VERTEX_SE3:QUAT 0 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162 +VERTEX_SE3:QUAT 1 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508 +VERTEX_SE3:QUAT 2 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10 +VERTEX_SE3:QUAT 3 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508 +VERTEX_SE3:QUAT 4 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162 +VERTEX_SE3:QUAT 5 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567 +VERTEX_SE3:QUAT 6 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412 +VERTEX_SE3:QUAT 7 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567 +VERTEX_TRACKXYZ 0 10 10 10 +VERTEX_TRACKXYZ 1 -10 10 10 +VERTEX_TRACKXYZ 2 -10 -10 10 +VERTEX_TRACKXYZ 3 10 -10 10 +VERTEX_TRACKXYZ 4 10 10 -10 +VERTEX_TRACKXYZ 5 -10 10 -10 +VERTEX_TRACKXYZ 6 -10 -10 -10 +VERTEX_TRACKXYZ 7 10 -10 -10 diff --git a/gtsam.h b/gtsam.h index 2cd30be42..c5d25b0e9 100644 --- a/gtsam.h +++ b/gtsam.h @@ -2369,6 +2369,7 @@ virtual class NonlinearOptimizer { double error() const; int iterations() const; gtsam::Values values() const; + gtsam::NonlinearFactorGraph graph() const; gtsam::GaussianFactorGraph* iterate() const; }; @@ -2568,10 +2569,12 @@ virtual class BetweenFactor : gtsam::NoiseModelFactor { void serialize() const; }; - - #include -template +template virtual class NonlinearEquality : gtsam::NoiseModelFactor { // Constructor - forces exact evaluation NonlinearEquality(size_t j, const T& feasible); @@ -2582,7 +2585,6 @@ virtual class NonlinearEquality : gtsam::NoiseModelFactor { void serialize() const; }; - #include template virtual class RangeFactor : gtsam::NoiseModelFactor { @@ -2853,7 +2855,7 @@ virtual class KarcherMeanFactor : gtsam::NonlinearFactor { }; #include -gtsam::noiseModel::Isotropic* ConvertPose3NoiseModel( +gtsam::noiseModel::Isotropic* ConvertNoiseModel( gtsam::noiseModel::Base* model, size_t d); template @@ -2872,12 +2874,144 @@ virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor { Vector evaluateError(const T& R1, const T& R2); }; -virtual class FrobeniusWormholeFactor : gtsam::NoiseModelFactor { - FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12, +#include + +virtual class ShonanFactor3 : gtsam::NoiseModelFactor { + ShonanFactor(size_t key1, size_t key2, const gtsam::Rot3 &R12, size_t p); - FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12, - size_t p, gtsam::noiseModel::Base* model); - Vector evaluateError(const gtsam::SOn& Q1, const gtsam::SOn& Q2); + ShonanFactor(size_t key1, size_t key2, const gtsam::Rot3 &R12, + size_t p, gtsam::noiseModel::Base *model); + Vector evaluateError(const gtsam::SOn &Q1, const gtsam::SOn &Q2); +}; + +#include +template +class BinaryMeasurement { + BinaryMeasurement(size_t key1, size_t key2, const T& measured, + const gtsam::noiseModel::Base* model); + size_t key1() const; + size_t key2() const; + T measured() const; +}; + +typedef gtsam::BinaryMeasurement BinaryMeasurementUnit3; +typedef gtsam::BinaryMeasurement BinaryMeasurementRot3; + +#include + +// TODO(frank): copy/pasta below until we have integer template paremeters in wrap! + +class ShonanAveragingParameters2 { + ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm); + ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm, string method); + gtsam::LevenbergMarquardtParams getLMParams() const; + void setOptimalityThreshold(double value); + double getOptimalityThreshold() const; + void setAnchor(size_t index, const gtsam::Rot2& value); + void setAnchorWeight(double value); + double getAnchorWeight() const; + void setKarcherWeight(double value); + double getKarcherWeight(); + void setGaugesWeight(double value); + double getGaugesWeight(); +}; + +class ShonanAveragingParameters3 { + ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm); + ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm, string method); + gtsam::LevenbergMarquardtParams getLMParams() const; + void setOptimalityThreshold(double value); + double getOptimalityThreshold() const; + void setAnchor(size_t index, const gtsam::Rot3& value); + void setAnchorWeight(double value); + double getAnchorWeight() const; + void setKarcherWeight(double value); + double getKarcherWeight(); + void setGaugesWeight(double value); + double getGaugesWeight(); +}; + +class ShonanAveraging2 { + ShonanAveraging2(string g2oFile); + ShonanAveraging2(string g2oFile, + const gtsam::ShonanAveragingParameters2 ¶meters); + + // Query properties + size_t nrUnknowns() const; + size_t nrMeasurements() const; + gtsam::Rot2 measured(size_t i); + gtsam::KeyVector keys(size_t i); + + // Matrix API (advanced use, debugging) + Matrix denseD() const; + Matrix denseQ() const; + Matrix denseL() const; + // Matrix computeLambda_(Matrix S) const; + Matrix computeLambda_(const gtsam::Values& values) const; + Matrix computeA_(const gtsam::Values& values) const; + double computeMinEigenValue(const gtsam::Values& values) const; + gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values, + const Vector& minEigenVector, double minEigenValue) const; + + // Advanced API + gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const; + double costAt(size_t p, const gtsam::Values& values) const; + pair computeMinEigenVector(const gtsam::Values& values) const; + bool checkOptimality(const gtsam::Values& values) const; + gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial); + // gtsam::Values tryOptimizingAt(size_t p) const; + gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const; + gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const; + gtsam::Values roundSolution(const gtsam::Values& values) const; + + // Basic API + double cost(const gtsam::Values& values) const; + gtsam::Values initializeRandomly() const; + pair run(const gtsam::Values& initial, size_t min_p, size_t max_p) const; +}; + +class ShonanAveraging3 { + ShonanAveraging3(string g2oFile); + ShonanAveraging3(string g2oFile, + const gtsam::ShonanAveragingParameters3 ¶meters); + + // TODO(frank): deprecate once we land pybind wrapper + ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors); + ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors, + const gtsam::ShonanAveragingParameters3 ¶meters); + + // Query properties + size_t nrUnknowns() const; + size_t nrMeasurements() const; + gtsam::Rot3 measured(size_t i); + gtsam::KeyVector keys(size_t i); + + // Matrix API (advanced use, debugging) + Matrix denseD() const; + Matrix denseQ() const; + Matrix denseL() const; + // Matrix computeLambda_(Matrix S) const; + Matrix computeLambda_(const gtsam::Values& values) const; + Matrix computeA_(const gtsam::Values& values) const; + double computeMinEigenValue(const gtsam::Values& values) const; + gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values, + const Vector& minEigenVector, double minEigenValue) const; + + // Advanced API + gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const; + double costAt(size_t p, const gtsam::Values& values) const; + pair computeMinEigenVector(const gtsam::Values& values) const; + bool checkOptimality(const gtsam::Values& values) const; + gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial); + // gtsam::Values tryOptimizingAt(size_t p) const; + gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const; + gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const; + gtsam::Values roundSolution(const gtsam::Values& values) const; + + // Basic API + double cost(const gtsam::Values& values) const; + gtsam::Values initializeRandomly() const; + pair run(const gtsam::Values& initial, size_t min_p, size_t max_p) const; }; //************************************************************************* @@ -2996,6 +3130,7 @@ class PreintegratedImuMeasurements { void resetIntegrationAndSetBias(const gtsam::imuBias::ConstantBias& biasHat); Matrix preintMeasCov() const; + Vector preintegrated() const; double deltaTij() const; gtsam::Rot3 deltaRij() const; Vector deltaPij() const; diff --git a/gtsam/3rdparty/CMakeLists.txt b/gtsam/3rdparty/CMakeLists.txt index c8fecc339..8b356393b 100644 --- a/gtsam/3rdparty/CMakeLists.txt +++ b/gtsam/3rdparty/CMakeLists.txt @@ -1,7 +1,8 @@ -# install CCOLAMD headers +# install CCOLAMD and SuiteSparse_config headers install(FILES CCOLAMD/Include/ccolamd.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/CCOLAMD) install(FILES SuiteSparse_config/SuiteSparse_config.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/SuiteSparse_config) +# install Eigen unless System Eigen is used if(NOT GTSAM_USE_SYSTEM_EIGEN) # Find plain .h files file(GLOB_RECURSE eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/Eigen/*.h") diff --git a/gtsam/3rdparty/Spectra/GenEigsBase.h b/gtsam/3rdparty/Spectra/GenEigsBase.h new file mode 100644 index 000000000..e084033d7 --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsBase.h @@ -0,0 +1,482 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_BASE_H +#define GEN_EIGS_BASE_H + +#include +#include // std::vector +#include // std::abs, std::pow, std::sqrt +#include // std::min, std::copy +#include // std::complex, std::conj, std::norm, std::abs +#include // std::invalid_argument + +#include "Util/TypeTraits.h" +#include "Util/SelectionRule.h" +#include "Util/CompInfo.h" +#include "Util/SimpleRandom.h" +#include "MatOp/internal/ArnoldiOp.h" +#include "LinAlg/UpperHessenbergQR.h" +#include "LinAlg/DoubleShiftQR.h" +#include "LinAlg/UpperHessenbergEigen.h" +#include "LinAlg/Arnoldi.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This is the base class for general eigen solvers, mainly for internal use. +/// It is kept here to provide the documentation for member functions of concrete eigen solvers +/// such as GenEigsSolver and GenEigsRealShiftSolver. +/// +template +class GenEigsBase +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Array Array; + typedef Eigen::Array BoolArray; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstVec; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + typedef ArnoldiOp ArnoldiOpType; + typedef Arnoldi ArnoldiFac; + +protected: + // clang-format off + OpType* m_op; // object to conduct matrix operation, + // e.g. matrix-vector product + const Index m_n; // dimension of matrix A + const Index m_nev; // number of eigenvalues requested + const Index m_ncv; // dimension of Krylov subspace in the Arnoldi method + Index m_nmatop; // number of matrix operations called + Index m_niter; // number of restarting iterations + + ArnoldiFac m_fac; // Arnoldi factorization + + ComplexVector m_ritz_val; // Ritz values + ComplexMatrix m_ritz_vec; // Ritz vectors + ComplexVector m_ritz_est; // last row of m_ritz_vec + +private: + BoolArray m_ritz_conv; // indicator of the convergence of Ritz values + int m_info; // status of the computation + + const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type + const Scalar m_eps23; // m_eps^(2/3), used to test the convergence + // clang-format on + + // Real Ritz values calculated from UpperHessenbergEigen have exact zero imaginary part + // Complex Ritz values have exact conjugate pairs + // So we use exact tests here + static bool is_complex(const Complex& v) { return v.imag() != Scalar(0); } + static bool is_conj(const Complex& v1, const Complex& v2) { return v1 == Eigen::numext::conj(v2); } + + // Implicitly restarted Arnoldi factorization + void restart(Index k) + { + using std::norm; + + if (k >= m_ncv) + return; + + DoubleShiftQR decomp_ds(m_ncv); + UpperHessenbergQR decomp_hb(m_ncv); + Matrix Q = Matrix::Identity(m_ncv, m_ncv); + + for (Index i = k; i < m_ncv; i++) + { + if (is_complex(m_ritz_val[i]) && is_conj(m_ritz_val[i], m_ritz_val[i + 1])) + { + // H - mu * I = Q1 * R1 + // H <- R1 * Q1 + mu * I = Q1' * H * Q1 + // H - conj(mu) * I = Q2 * R2 + // H <- R2 * Q2 + conj(mu) * I = Q2' * H * Q2 + // + // (H - mu * I) * (H - conj(mu) * I) = Q1 * Q2 * R2 * R1 = Q * R + const Scalar s = Scalar(2) * m_ritz_val[i].real(); + const Scalar t = norm(m_ritz_val[i]); + + decomp_ds.compute(m_fac.matrix_H(), s, t); + + // Q -> Q * Qi + decomp_ds.apply_YQ(Q); + // H -> Q'HQ + // Matrix Q = Matrix::Identity(m_ncv, m_ncv); + // decomp_ds.apply_YQ(Q); + // m_fac_H = Q.transpose() * m_fac_H * Q; + m_fac.compress_H(decomp_ds); + + i++; + } + else + { + // QR decomposition of H - mu * I, mu is real + decomp_hb.compute(m_fac.matrix_H(), m_ritz_val[i].real()); + + // Q -> Q * Qi + decomp_hb.apply_YQ(Q); + // H -> Q'HQ = RQ + mu * I + m_fac.compress_H(decomp_hb); + } + } + + m_fac.compress_V(Q); + m_fac.factorize_from(k, m_ncv, m_nmatop); + + retrieve_ritzpair(); + } + + // Calculates the number of converged Ritz values + Index num_converged(Scalar tol) + { + // thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value + Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23); + Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm(); + // Converged "wanted" Ritz values + m_ritz_conv = (resid < thresh); + + return m_ritz_conv.cast().sum(); + } + + // Returns the adjusted nev for restarting + Index nev_adjusted(Index nconv) + { + using std::abs; + + Index nev_new = m_nev; + for (Index i = m_nev; i < m_ncv; i++) + if (abs(m_ritz_est[i]) < m_near_0) + nev_new++; + + // Adjust nev_new, according to dnaup2.f line 660~674 in ARPACK + nev_new += std::min(nconv, (m_ncv - nev_new) / 2); + if (nev_new == 1 && m_ncv >= 6) + nev_new = m_ncv / 2; + else if (nev_new == 1 && m_ncv > 3) + nev_new = 2; + + if (nev_new > m_ncv - 2) + nev_new = m_ncv - 2; + + // Increase nev by one if ritz_val[nev - 1] and + // ritz_val[nev] are conjugate pairs + if (is_complex(m_ritz_val[nev_new - 1]) && + is_conj(m_ritz_val[nev_new - 1], m_ritz_val[nev_new])) + { + nev_new++; + } + + return nev_new; + } + + // Retrieves and sorts Ritz values and Ritz vectors + void retrieve_ritzpair() + { + UpperHessenbergEigen decomp(m_fac.matrix_H()); + const ComplexVector& evals = decomp.eigenvalues(); + ComplexMatrix evecs = decomp.eigenvectors(); + + SortEigenvalue sorting(evals.data(), evals.size()); + std::vector ind = sorting.index(); + + // Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively + for (Index i = 0; i < m_ncv; i++) + { + m_ritz_val[i] = evals[ind[i]]; + m_ritz_est[i] = evecs(m_ncv - 1, ind[i]); + } + for (Index i = 0; i < m_nev; i++) + { + m_ritz_vec.col(i).noalias() = evecs.col(ind[i]); + } + } + +protected: + // Sorts the first nev Ritz pairs in the specified order + // This is used to return the final results + virtual void sort_ritzpair(int sort_rule) + { + // First make sure that we have a valid index vector + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + std::vector ind = sorting.index(); + + switch (sort_rule) + { + case LARGEST_MAGN: + break; + case LARGEST_REAL: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case LARGEST_IMAG: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_MAGN: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_REAL: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_IMAG: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + default: + throw std::invalid_argument("unsupported sorting rule"); + } + + ComplexVector new_ritz_val(m_ncv); + ComplexMatrix new_ritz_vec(m_ncv, m_nev); + BoolArray new_ritz_conv(m_nev); + + for (Index i = 0; i < m_nev; i++) + { + new_ritz_val[i] = m_ritz_val[ind[i]]; + new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]); + new_ritz_conv[i] = m_ritz_conv[ind[i]]; + } + + m_ritz_val.swap(new_ritz_val); + m_ritz_vec.swap(new_ritz_vec); + m_ritz_conv.swap(new_ritz_conv); + } + +public: + /// \cond + + GenEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) : + m_op(op), + m_n(m_op->rows()), + m_nev(nev), + m_ncv(ncv > m_n ? m_n : ncv), + m_nmatop(0), + m_niter(0), + m_fac(ArnoldiOpType(op, Bop), m_ncv), + m_info(NOT_COMPUTED), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3)) + { + if (nev < 1 || nev > m_n - 2) + throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 2, n is the size of matrix"); + + if (ncv < nev + 2 || ncv > m_n) + throw std::invalid_argument("ncv must satisfy nev + 2 <= ncv <= n, n is the size of matrix"); + } + + /// + /// Virtual destructor + /// + virtual ~GenEigsBase() {} + + /// \endcond + + /// + /// Initializes the solver by providing an initial residual vector. + /// + /// \param init_resid Pointer to the initial residual vector. + /// + /// **Spectra** (and also **ARPACK**) uses an iterative algorithm + /// to find eigenvalues. This function allows the user to provide the initial + /// residual vector. + /// + void init(const Scalar* init_resid) + { + // Reset all matrices/vectors to zero + m_ritz_val.resize(m_ncv); + m_ritz_vec.resize(m_ncv, m_nev); + m_ritz_est.resize(m_ncv); + m_ritz_conv.resize(m_nev); + + m_ritz_val.setZero(); + m_ritz_vec.setZero(); + m_ritz_est.setZero(); + m_ritz_conv.setZero(); + + m_nmatop = 0; + m_niter = 0; + + // Initialize the Arnoldi factorization + MapConstVec v0(init_resid, m_n); + m_fac.init(v0, m_nmatop); + } + + /// + /// Initializes the solver by providing a random initial residual vector. + /// + /// This overloaded function generates a random initial residual vector + /// (with a fixed random seed) for the algorithm. Elements in the vector + /// follow independent Uniform(-0.5, 0.5) distribution. + /// + void init() + { + SimpleRandom rng(0); + Vector init_resid = rng.random_vec(m_n); + init(init_resid.data()); + } + + /// + /// Conducts the major computation procedure. + /// + /// \param maxit Maximum number of iterations allowed in the algorithm. + /// \param tol Precision parameter for the calculated eigenvalues. + /// \param sort_rule Rule to sort the eigenvalues and eigenvectors. + /// Supported values are + /// `Spectra::LARGEST_MAGN`, `Spectra::LARGEST_REAL`, + /// `Spectra::LARGEST_IMAG`, `Spectra::SMALLEST_MAGN`, + /// `Spectra::SMALLEST_REAL` and `Spectra::SMALLEST_IMAG`, + /// for example `LARGEST_MAGN` indicates that eigenvalues + /// with largest magnitude come first. + /// Note that this argument is only used to + /// **sort** the final result, and the **selection** rule + /// (e.g. selecting the largest or smallest eigenvalues in the + /// full spectrum) is specified by the template parameter + /// `SelectionRule` of GenEigsSolver. + /// + /// \return Number of converged eigenvalues. + /// + Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_MAGN) + { + // The m-step Arnoldi factorization + m_fac.factorize_from(1, m_ncv, m_nmatop); + retrieve_ritzpair(); + // Restarting + Index i, nconv = 0, nev_adj; + for (i = 0; i < maxit; i++) + { + nconv = num_converged(tol); + if (nconv >= m_nev) + break; + + nev_adj = nev_adjusted(nconv); + restart(nev_adj); + } + // Sorting results + sort_ritzpair(sort_rule); + + m_niter += i + 1; + m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING; + + return std::min(m_nev, nconv); + } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Returns the number of iterations used in the computation. + /// + Index num_iterations() const { return m_niter; } + + /// + /// Returns the number of matrix operations used in the computation. + /// + Index num_operations() const { return m_nmatop; } + + /// + /// Returns the converged eigenvalues. + /// + /// \return A complex-valued vector containing the eigenvalues. + /// Returned vector type will be `Eigen::Vector, ...>`, depending on + /// the template parameter `Scalar` defined. + /// + ComplexVector eigenvalues() const + { + const Index nconv = m_ritz_conv.cast().sum(); + ComplexVector res(nconv); + + if (!nconv) + return res; + + Index j = 0; + for (Index i = 0; i < m_nev; i++) + { + if (m_ritz_conv[i]) + { + res[j] = m_ritz_val[i]; + j++; + } + } + + return res; + } + + /// + /// Returns the eigenvectors associated with the converged eigenvalues. + /// + /// \param nvec The number of eigenvectors to return. + /// + /// \return A complex-valued matrix containing the eigenvectors. + /// Returned matrix type will be `Eigen::Matrix, ...>`, + /// depending on the template parameter `Scalar` defined. + /// + ComplexMatrix eigenvectors(Index nvec) const + { + const Index nconv = m_ritz_conv.cast().sum(); + nvec = std::min(nvec, nconv); + ComplexMatrix res(m_n, nvec); + + if (!nvec) + return res; + + ComplexMatrix ritz_vec_conv(m_ncv, nvec); + Index j = 0; + for (Index i = 0; i < m_nev && j < nvec; i++) + { + if (m_ritz_conv[i]) + { + ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i); + j++; + } + } + + res.noalias() = m_fac.matrix_V() * ritz_vec_conv; + + return res; + } + + /// + /// Returns all converged eigenvectors. + /// + ComplexMatrix eigenvectors() const + { + return eigenvectors(m_nev); + } +}; + +} // namespace Spectra + +#endif // GEN_EIGS_BASE_H diff --git a/gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h b/gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h new file mode 100644 index 000000000..da7c1b422 --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h @@ -0,0 +1,157 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_COMPLEX_SHIFT_SOLVER_H +#define GEN_EIGS_COMPLEX_SHIFT_SOLVER_H + +#include + +#include "GenEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseGenComplexShiftSolve.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for general real matrices with +/// a complex shift value in the **shift-and-invert mode**. The background +/// knowledge of the shift-and-invert mode can be found in the documentation +/// of the SymEigsShiftSolver class. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the shifted-and-inverted eigenvalues. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the DenseGenComplexShiftSolve wrapper class, or define their +/// own that implements all the public member functions as in +/// DenseGenComplexShiftSolve. +/// +template > +class GenEigsComplexShiftSolver : public GenEigsBase +{ +private: + typedef Eigen::Index Index; + typedef std::complex Complex; + typedef Eigen::Matrix Vector; + typedef Eigen::Matrix ComplexVector; + + const Scalar m_sigmar; + const Scalar m_sigmai; + + // First transform back the Ritz values, and then sort + void sort_ritzpair(int sort_rule) + { + using std::abs; + using std::sqrt; + using std::norm; + + // The eigenvalues we get from the iteration is + // nu = 0.5 * (1 / (lambda - sigma) + 1 / (lambda - conj(sigma))) + // So the eigenvalues of the original problem is + // 1 \pm sqrt(1 - 4 * nu^2 * sigmai^2) + // lambda = sigmar + ----------------------------------- + // 2 * nu + // We need to pick the correct root + // Let (lambdaj, vj) be the j-th eigen pair, then A * vj = lambdaj * vj + // and inv(A - r * I) * vj = 1 / (lambdaj - r) * vj + // where r is any shift value. + // We can use this identity to determine lambdaj + // + // op(v) computes Re(inv(A - r * I) * v) for any real v + // If r is real, then op(v) is also real. Let a = Re(vj), b = Im(vj), + // then op(vj) = op(a) + op(b) * i + // By comparing op(vj) and [1 / (lambdaj - r) * vj], we can determine + // which one is the correct root + + // Select a random shift value + SimpleRandom rng(0); + const Scalar shiftr = rng.random() * m_sigmar + rng.random(); + const Complex shift = Complex(shiftr, Scalar(0)); + this->m_op->set_shift(shiftr, Scalar(0)); + + // Calculate inv(A - r * I) * vj + Vector v_real(this->m_n), v_imag(this->m_n), OPv_real(this->m_n), OPv_imag(this->m_n); + const Scalar eps = Eigen::NumTraits::epsilon(); + for (Index i = 0; i < this->m_nev; i++) + { + v_real.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).real(); + v_imag.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).imag(); + this->m_op->perform_op(v_real.data(), OPv_real.data()); + this->m_op->perform_op(v_imag.data(), OPv_imag.data()); + + // Two roots computed from the quadratic equation + const Complex nu = this->m_ritz_val[i]; + const Complex root_part1 = m_sigmar + Scalar(0.5) / nu; + const Complex root_part2 = Scalar(0.5) * sqrt(Scalar(1) - Scalar(4) * m_sigmai * m_sigmai * (nu * nu)) / nu; + const Complex root1 = root_part1 + root_part2; + const Complex root2 = root_part1 - root_part2; + + // Test roots + Scalar err1 = Scalar(0), err2 = Scalar(0); + for (int k = 0; k < this->m_n; k++) + { + const Complex rhs1 = Complex(v_real[k], v_imag[k]) / (root1 - shift); + const Complex rhs2 = Complex(v_real[k], v_imag[k]) / (root2 - shift); + const Complex OPv = Complex(OPv_real[k], OPv_imag[k]); + err1 += norm(OPv - rhs1); + err2 += norm(OPv - rhs2); + } + + const Complex lambdaj = (err1 < err2) ? root1 : root2; + this->m_ritz_val[i] = lambdaj; + + if (abs(Eigen::numext::imag(lambdaj)) > eps) + { + this->m_ritz_val[i + 1] = Eigen::numext::conj(lambdaj); + i++; + } + else + { + this->m_ritz_val[i] = Complex(Eigen::numext::real(lambdaj), Scalar(0)); + } + } + + GenEigsBase::sort_ritzpair(sort_rule); + } + +public: + /// + /// Constructor to create a eigen solver object using the shift-and-invert mode. + /// + /// \param op Pointer to the matrix operation object. This class should implement + /// the complex shift-solve operation of \f$A\f$: calculating + /// \f$\mathrm{Re}\{(A-\sigma I)^{-1}v\}\f$ for any vector \f$v\f$. Users could either + /// create the object from the DenseGenComplexShiftSolve wrapper class, or + /// define their own that implements all the public member functions + /// as in DenseGenComplexShiftSolve. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. + /// \param sigmar The real part of the shift. + /// \param sigmai The imaginary part of the shift. + /// + GenEigsComplexShiftSolver(OpType* op, Index nev, Index ncv, const Scalar& sigmar, const Scalar& sigmai) : + GenEigsBase(op, NULL, nev, ncv), + m_sigmar(sigmar), m_sigmai(sigmai) + { + this->m_op->set_shift(m_sigmar, m_sigmai); + } +}; + +} // namespace Spectra + +#endif // GEN_EIGS_COMPLEX_SHIFT_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h b/gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h new file mode 100644 index 000000000..6f28308f1 --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h @@ -0,0 +1,89 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_REAL_SHIFT_SOLVER_H +#define GEN_EIGS_REAL_SHIFT_SOLVER_H + +#include + +#include "GenEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseGenRealShiftSolve.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for general real matrices with +/// a real shift value in the **shift-and-invert mode**. The background +/// knowledge of the shift-and-invert mode can be found in the documentation +/// of the SymEigsShiftSolver class. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the shifted-and-inverted eigenvalues. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseGenRealShiftSolve and +/// SparseGenRealShiftSolve, or define their +/// own that implements all the public member functions as in +/// DenseGenRealShiftSolve. +/// +template > +class GenEigsRealShiftSolver : public GenEigsBase +{ +private: + typedef Eigen::Index Index; + typedef std::complex Complex; + typedef Eigen::Array ComplexArray; + + const Scalar m_sigma; + + // First transform back the Ritz values, and then sort + void sort_ritzpair(int sort_rule) + { + // The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma) + // So the eigenvalues of the original problem is lambda = 1 / nu + sigma + ComplexArray ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma; + this->m_ritz_val.head(this->m_nev) = ritz_val_org; + GenEigsBase::sort_ritzpair(sort_rule); + } + +public: + /// + /// Constructor to create a eigen solver object using the shift-and-invert mode. + /// + /// \param op Pointer to the matrix operation object. This class should implement + /// the shift-solve operation of \f$A\f$: calculating + /// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseGenRealShiftSolve, or + /// define their own that implements all the public member functions + /// as in DenseGenRealShiftSolve. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. + /// \param sigma The real-valued shift. + /// + GenEigsRealShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) : + GenEigsBase(op, NULL, nev, ncv), + m_sigma(sigma) + { + this->m_op->set_shift(m_sigma); + } +}; + +} // namespace Spectra + +#endif // GEN_EIGS_REAL_SHIFT_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/GenEigsSolver.h b/gtsam/3rdparty/Spectra/GenEigsSolver.h new file mode 100644 index 000000000..3ead1dc4d --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsSolver.h @@ -0,0 +1,158 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_SOLVER_H +#define GEN_EIGS_SOLVER_H + +#include + +#include "GenEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseGenMatProd.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for general real matrices, i.e., +/// to solve \f$Ax=\lambda x\f$ for a possibly non-symmetric \f$A\f$ matrix. +/// +/// Most of the background information documented in the SymEigsSolver class +/// also applies to the GenEigsSolver class here, except that the eigenvalues +/// and eigenvectors of a general matrix can now be complex-valued. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseGenMatProd and +/// SparseGenMatProd, or define their +/// own that implements all the public member functions as in +/// DenseGenMatProd. +/// +/// An example that illustrates the usage of GenEigsSolver is give below: +/// +/// \code{.cpp} +/// #include +/// #include +/// // is implicitly included +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // We are going to calculate the eigenvalues of M +/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(10, 10); +/// +/// // Construct matrix operation object using the wrapper class +/// DenseGenMatProd op(M); +/// +/// // Construct eigen solver object, requesting the largest +/// // (in magnitude, or norm) three eigenvalues +/// GenEigsSolver< double, LARGEST_MAGN, DenseGenMatProd > eigs(&op, 3, 6); +/// +/// // Initialize and compute +/// eigs.init(); +/// int nconv = eigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXcd evalues; +/// if(eigs.info() == SUCCESSFUL) +/// evalues = eigs.eigenvalues(); +/// +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// +/// return 0; +/// } +/// \endcode +/// +/// And also an example for sparse matrices: +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // A band matrix with 1 on the main diagonal, 2 on the below-main subdiagonal, +/// // and 3 on the above-main subdiagonal +/// const int n = 10; +/// Eigen::SparseMatrix M(n, n); +/// M.reserve(Eigen::VectorXi::Constant(n, 3)); +/// for(int i = 0; i < n; i++) +/// { +/// M.insert(i, i) = 1.0; +/// if(i > 0) +/// M.insert(i - 1, i) = 3.0; +/// if(i < n - 1) +/// M.insert(i + 1, i) = 2.0; +/// } +/// +/// // Construct matrix operation object using the wrapper class SparseGenMatProd +/// SparseGenMatProd op(M); +/// +/// // Construct eigen solver object, requesting the largest three eigenvalues +/// GenEigsSolver< double, LARGEST_MAGN, SparseGenMatProd > eigs(&op, 3, 6); +/// +/// // Initialize and compute +/// eigs.init(); +/// int nconv = eigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXcd evalues; +/// if(eigs.info() == SUCCESSFUL) +/// evalues = eigs.eigenvalues(); +/// +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// +/// return 0; +/// } +/// \endcode +template > +class GenEigsSolver : public GenEigsBase +{ +private: + typedef Eigen::Index Index; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the matrix operation object, which should implement + /// the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseGenMatProd, or + /// define their own that implements all the public member functions + /// as in DenseGenMatProd. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. + /// + GenEigsSolver(OpType* op, Index nev, Index ncv) : + GenEigsBase(op, NULL, nev, ncv) + {} +}; + +} // namespace Spectra + +#endif // GEN_EIGS_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h b/gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h new file mode 100644 index 000000000..730ba9556 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h @@ -0,0 +1,284 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef ARNOLDI_H +#define ARNOLDI_H + +#include +#include // std::sqrt +#include // std::invalid_argument +#include // std::stringstream + +#include "../MatOp/internal/ArnoldiOp.h" +#include "../Util/TypeTraits.h" +#include "../Util/SimpleRandom.h" +#include "UpperHessenbergQR.h" +#include "DoubleShiftQR.h" + +namespace Spectra { + +// Arnoldi factorization A * V = V * H + f * e' +// A: n x n +// V: n x k +// H: k x k +// f: n x 1 +// e: [0, ..., 0, 1] +// V and H are allocated of dimension m, so the maximum value of k is m +template +class Arnoldi +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstMat; + typedef Eigen::Map MapConstVec; + +protected: + // clang-format off + ArnoldiOpType m_op; // Operators for the Arnoldi factorization + + const Index m_n; // dimension of A + const Index m_m; // maximum dimension of subspace V + Index m_k; // current dimension of subspace V + + Matrix m_fac_V; // V matrix in the Arnoldi factorization + Matrix m_fac_H; // H matrix in the Arnoldi factorization + Vector m_fac_f; // residual in the Arnoldi factorization + Scalar m_beta; // ||f||, B-norm of f + + const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type + // clang-format on + + // Given orthonormal basis functions V, find a nonzero vector f such that V'Bf = 0 + // Assume that f has been properly allocated + void expand_basis(MapConstMat& V, const Index seed, Vector& f, Scalar& fnorm) + { + using std::sqrt; + + const Scalar thresh = m_eps * sqrt(Scalar(m_n)); + Vector Vf(V.cols()); + for (Index iter = 0; iter < 5; iter++) + { + // Randomly generate a new vector and orthogonalize it against V + SimpleRandom rng(seed + 123 * iter); + f.noalias() = rng.random_vec(m_n); + // f <- f - V * V'Bf, so that f is orthogonal to V in B-norm + m_op.trans_product(V, f, Vf); + f.noalias() -= V * Vf; + // fnorm <- ||f|| + fnorm = m_op.norm(f); + + // If fnorm is too close to zero, we try a new random vector, + // otherwise return the result + if (fnorm >= thresh) + return; + } + } + +public: + Arnoldi(const ArnoldiOpType& op, Index m) : + m_op(op), m_n(op.rows()), m_m(m), m_k(0), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()) + {} + + virtual ~Arnoldi() {} + + // Const-reference to internal structures + const Matrix& matrix_V() const { return m_fac_V; } + const Matrix& matrix_H() const { return m_fac_H; } + const Vector& vector_f() const { return m_fac_f; } + Scalar f_norm() const { return m_beta; } + Index subspace_dim() const { return m_k; } + + // Initialize with an operator and an initial vector + void init(MapConstVec& v0, Index& op_counter) + { + m_fac_V.resize(m_n, m_m); + m_fac_H.resize(m_m, m_m); + m_fac_f.resize(m_n); + m_fac_H.setZero(); + + // Verify the initial vector + const Scalar v0norm = m_op.norm(v0); + if (v0norm < m_near_0) + throw std::invalid_argument("initial residual vector cannot be zero"); + + // Points to the first column of V + MapVec v(m_fac_V.data(), m_n); + + // Normalize + v.noalias() = v0 / v0norm; + + // Compute H and f + Vector w(m_n); + m_op.perform_op(v.data(), w.data()); + op_counter++; + + m_fac_H(0, 0) = m_op.inner_product(v, w); + m_fac_f.noalias() = w - v * m_fac_H(0, 0); + + // In some cases f is zero in exact arithmetics, but due to rounding errors + // it may contain tiny fluctuations. When this happens, we force f to be zero + if (m_fac_f.cwiseAbs().maxCoeff() < m_eps) + { + m_fac_f.setZero(); + m_beta = Scalar(0); + } + else + { + m_beta = m_op.norm(m_fac_f); + } + + // Indicate that this is a step-1 factorization + m_k = 1; + } + + // Arnoldi factorization starting from step-k + virtual void factorize_from(Index from_k, Index to_m, Index& op_counter) + { + using std::sqrt; + + if (to_m <= from_k) + return; + + if (from_k > m_k) + { + std::stringstream msg; + msg << "Arnoldi: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")"; + throw std::invalid_argument(msg.str()); + } + + const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n)); + + // Pre-allocate vectors + Vector Vf(to_m); + Vector w(m_n); + + // Keep the upperleft k x k submatrix of H and set other elements to 0 + m_fac_H.rightCols(m_m - from_k).setZero(); + m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero(); + + for (Index i = from_k; i <= to_m - 1; i++) + { + bool restart = false; + // If beta = 0, then the next V is not full rank + // We need to generate a new residual vector that is orthogonal + // to the current V, which we call a restart + if (m_beta < m_near_0) + { + MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns + expand_basis(V, 2 * i, m_fac_f, m_beta); + restart = true; + } + + // v <- f / ||f|| + m_fac_V.col(i).noalias() = m_fac_f / m_beta; // The (i+1)-th column + + // Note that H[i+1, i] equals to the unrestarted beta + m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta; + + // w <- A * v, v = m_fac_V.col(i) + m_op.perform_op(&m_fac_V(0, i), w.data()); + op_counter++; + + const Index i1 = i + 1; + // First i+1 columns of V + MapConstMat Vs(m_fac_V.data(), m_n, i1); + // h = m_fac_H(0:i, i) + MapVec h(&m_fac_H(0, i), i1); + // h <- V'Bw + m_op.trans_product(Vs, w, h); + + // f <- w - V * h + m_fac_f.noalias() = w - Vs * h; + m_beta = m_op.norm(m_fac_f); + + if (m_beta > Scalar(0.717) * m_op.norm(h)) + continue; + + // f/||f|| is going to be the next column of V, so we need to test + // whether V'B(f/||f||) ~= 0 + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + // If not, iteratively correct the residual + int count = 0; + while (count < 5 && ortho_err > m_eps * m_beta) + { + // There is an edge case: when beta=||f|| is close to zero, f mostly consists + // of noises of rounding errors, so the test [ortho_err < eps * beta] is very + // likely to fail. In particular, if beta=0, then the test is ensured to fail. + // Hence when this happens, we force f to be zero, and then restart in the + // next iteration. + if (m_beta < beta_thresh) + { + m_fac_f.setZero(); + m_beta = Scalar(0); + break; + } + + // f <- f - V * Vf + m_fac_f.noalias() -= Vs * Vf.head(i1); + // h <- h + Vf + h.noalias() += Vf.head(i1); + // beta <- ||f|| + m_beta = m_op.norm(m_fac_f); + + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + count++; + } + } + + // Indicate that this is a step-m factorization + m_k = to_m; + } + + // Apply H -> Q'HQ, where Q is from a double shift QR decomposition + void compress_H(const DoubleShiftQR& decomp) + { + decomp.matrix_QtHQ(m_fac_H); + m_k -= 2; + } + + // Apply H -> Q'HQ, where Q is from an upper Hessenberg QR decomposition + void compress_H(const UpperHessenbergQR& decomp) + { + decomp.matrix_QtHQ(m_fac_H); + m_k--; + } + + // Apply V -> VQ and compute the new f. + // Should be called after compress_H(), since m_k is updated there. + // Only need to update the first k+1 columns of V + // The first (m - k + i) elements of the i-th column of Q are non-zero, + // and the rest are zero + void compress_V(const Matrix& Q) + { + Matrix Vs(m_n, m_k + 1); + for (Index i = 0; i < m_k; i++) + { + const Index nnz = m_m - m_k + i + 1; + MapConstVec q(&Q(0, i), nnz); + Vs.col(i).noalias() = m_fac_V.leftCols(nnz) * q; + } + Vs.col(m_k).noalias() = m_fac_V * Q.col(m_k); + m_fac_V.leftCols(m_k + 1).noalias() = Vs; + + Vector fk = m_fac_f * Q(m_m - 1, m_k - 1) + m_fac_V.col(m_k) * m_fac_H(m_k, m_k - 1); + m_fac_f.swap(fk); + m_beta = m_op.norm(m_fac_f); + } +}; + +} // namespace Spectra + +#endif // ARNOLDI_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h b/gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h new file mode 100644 index 000000000..2345e0fda --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h @@ -0,0 +1,530 @@ +// Copyright (C) 2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef BK_LDLT_H +#define BK_LDLT_H + +#include +#include +#include + +#include "../Util/CompInfo.h" + +namespace Spectra { + +// Bunch-Kaufman LDLT decomposition +// References: +// 1. Bunch, J. R., & Kaufman, L. (1977). Some stable methods for calculating inertia and solving symmetric linear systems. +// Mathematics of computation, 31(137), 163-179. +// 2. Golub, G. H., & Van Loan, C. F. (2012). Matrix computations (Vol. 3). JHU press. Section 4.4. +// 3. Bunch-Parlett diagonal pivoting +// 4. Ashcraft, C., Grimes, R. G., & Lewis, J. G. (1998). Accurate symmetric indefinite linear equation solvers. +// SIAM Journal on Matrix Analysis and Applications, 20(2), 513-561. +template +class BKLDLT +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstVec; + + typedef Eigen::Matrix IntVector; + typedef Eigen::Ref GenericVector; + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + typedef const Eigen::Ref ConstGenericVector; + + Index m_n; + Vector m_data; // storage for a lower-triangular matrix + std::vector m_colptr; // pointers to columns + IntVector m_perm; // [-2, -1, 3, 1, 4, 5]: 0 <-> 2, 1 <-> 1, 2 <-> 3, 3 <-> 1, 4 <-> 4, 5 <-> 5 + std::vector > m_permc; // compressed version of m_perm: [(0, 2), (2, 3), (3, 1)] + + bool m_computed; + int m_info; + + // Access to elements + // Pointer to the k-th column + Scalar* col_pointer(Index k) { return m_colptr[k]; } + // A[i, j] -> m_colptr[j][i - j], i >= j + Scalar& coeff(Index i, Index j) { return m_colptr[j][i - j]; } + const Scalar& coeff(Index i, Index j) const { return m_colptr[j][i - j]; } + // A[i, i] -> m_colptr[i][0] + Scalar& diag_coeff(Index i) { return m_colptr[i][0]; } + const Scalar& diag_coeff(Index i) const { return m_colptr[i][0]; } + + // Compute column pointers + void compute_pointer() + { + m_colptr.clear(); + m_colptr.reserve(m_n); + Scalar* head = m_data.data(); + + for (Index i = 0; i < m_n; i++) + { + m_colptr.push_back(head); + head += (m_n - i); + } + } + + // Copy mat - shift * I to m_data + void copy_data(ConstGenericMatrix& mat, int uplo, const Scalar& shift) + { + if (uplo == Eigen::Lower) + { + for (Index j = 0; j < m_n; j++) + { + const Scalar* begin = &mat.coeffRef(j, j); + const Index len = m_n - j; + std::copy(begin, begin + len, col_pointer(j)); + diag_coeff(j) -= shift; + } + } + else + { + Scalar* dest = m_data.data(); + for (Index i = 0; i < m_n; i++) + { + for (Index j = i; j < m_n; j++, dest++) + { + *dest = mat.coeff(i, j); + } + diag_coeff(i) -= shift; + } + } + } + + // Compute compressed permutations + void compress_permutation() + { + for (Index i = 0; i < m_n; i++) + { + // Recover the permutation action + const Index perm = (m_perm[i] >= 0) ? (m_perm[i]) : (-m_perm[i] - 1); + if (perm != i) + m_permc.push_back(std::make_pair(i, perm)); + } + } + + // Working on the A[k:end, k:end] submatrix + // Exchange k <-> r + // Assume r >= k + void pivoting_1x1(Index k, Index r) + { + // No permutation + if (k == r) + { + m_perm[k] = r; + return; + } + + // A[k, k] <-> A[r, r] + std::swap(diag_coeff(k), diag_coeff(r)); + + // A[(r+1):end, k] <-> A[(r+1):end, r] + std::swap_ranges(&coeff(r + 1, k), col_pointer(k + 1), &coeff(r + 1, r)); + + // A[(k+1):(r-1), k] <-> A[r, (k+1):(r-1)] + Scalar* src = &coeff(k + 1, k); + for (Index j = k + 1; j < r; j++, src++) + { + std::swap(*src, coeff(r, j)); + } + + m_perm[k] = r; + } + + // Working on the A[k:end, k:end] submatrix + // Exchange [k+1, k] <-> [r, p] + // Assume p >= k, r >= k+1 + void pivoting_2x2(Index k, Index r, Index p) + { + pivoting_1x1(k, p); + pivoting_1x1(k + 1, r); + + // A[k+1, k] <-> A[r, k] + std::swap(coeff(k + 1, k), coeff(r, k)); + + // Use negative signs to indicate a 2x2 block + // Also minus one to distinguish a negative zero from a positive zero + m_perm[k] = -m_perm[k] - 1; + m_perm[k + 1] = -m_perm[k + 1] - 1; + } + + // A[r1, c1:c2] <-> A[r2, c1:c2] + // Assume r2 >= r1 > c2 >= c1 + void interchange_rows(Index r1, Index r2, Index c1, Index c2) + { + if (r1 == r2) + return; + + for (Index j = c1; j <= c2; j++) + { + std::swap(coeff(r1, j), coeff(r2, j)); + } + } + + // lambda = |A[r, k]| = max{|A[k+1, k]|, ..., |A[end, k]|} + // Largest (in magnitude) off-diagonal element in the first column of the current reduced matrix + // r is the row index + // Assume k < end + Scalar find_lambda(Index k, Index& r) + { + using std::abs; + + const Scalar* head = col_pointer(k); // => A[k, k] + const Scalar* end = col_pointer(k + 1); + // Start with r=k+1, lambda=A[k+1, k] + r = k + 1; + Scalar lambda = abs(head[1]); + // Scan remaining elements + for (const Scalar* ptr = head + 2; ptr < end; ptr++) + { + const Scalar abs_elem = abs(*ptr); + if (lambda < abs_elem) + { + lambda = abs_elem; + r = k + (ptr - head); + } + } + + return lambda; + } + + // sigma = |A[p, r]| = max {|A[k, r]|, ..., |A[end, r]|} \ {A[r, r]} + // Largest (in magnitude) off-diagonal element in the r-th column of the current reduced matrix + // p is the row index + // Assume k < r < end + Scalar find_sigma(Index k, Index r, Index& p) + { + using std::abs; + + // First search A[r+1, r], ..., A[end, r], which has the same task as find_lambda() + // If r == end, we skip this search + Scalar sigma = Scalar(-1); + if (r < m_n - 1) + sigma = find_lambda(r, p); + + // Then search A[k, r], ..., A[r-1, r], which maps to A[r, k], ..., A[r, r-1] + for (Index j = k; j < r; j++) + { + const Scalar abs_elem = abs(coeff(r, j)); + if (sigma < abs_elem) + { + sigma = abs_elem; + p = j; + } + } + + return sigma; + } + + // Generate permutations and apply to A + // Return true if the resulting pivoting is 1x1, and false if 2x2 + bool permutate_mat(Index k, const Scalar& alpha) + { + using std::abs; + + Index r = k, p = k; + const Scalar lambda = find_lambda(k, r); + + // If lambda=0, no need to interchange + if (lambda > Scalar(0)) + { + const Scalar abs_akk = abs(diag_coeff(k)); + // If |A[k, k]| >= alpha * lambda, no need to interchange + if (abs_akk < alpha * lambda) + { + const Scalar sigma = find_sigma(k, r, p); + + // If sigma * |A[k, k]| >= alpha * lambda^2, no need to interchange + if (sigma * abs_akk < alpha * lambda * lambda) + { + if (abs_akk >= alpha * sigma) + { + // Permutation on A + pivoting_1x1(k, r); + + // Permutation on L + interchange_rows(k, r, 0, k - 1); + return true; + } + else + { + // There are two versions of permutation here + // 1. A[k+1, k] <-> A[r, k] + // 2. A[k+1, k] <-> A[r, p], where p >= k and r >= k+1 + // + // Version 1 and 2 are used by Ref[1] and Ref[2], respectively + + // Version 1 implementation + p = k; + + // Version 2 implementation + // [r, p] and [p, r] are symmetric, but we need to make sure + // p >= k and r >= k+1, so it is safe to always make r > p + // One exception is when min{r,p} == k+1, in which case we make + // r = k+1, so that only one permutation needs to be performed + /* const Index rp_min = std::min(r, p); + const Index rp_max = std::max(r, p); + if(rp_min == k + 1) + { + r = rp_min; p = rp_max; + } else { + r = rp_max; p = rp_min; + } */ + + // Right now we use Version 1 since it reduces the overhead of interchange + + // Permutation on A + pivoting_2x2(k, r, p); + // Permutation on L + interchange_rows(k, p, 0, k - 1); + interchange_rows(k + 1, r, 0, k - 1); + return false; + } + } + } + } + + return true; + } + + // E = [e11, e12] + // [e21, e22] + // Overwrite E with inv(E) + void inverse_inplace_2x2(Scalar& e11, Scalar& e21, Scalar& e22) const + { + // inv(E) = [d11, d12], d11 = e22/delta, d21 = -e21/delta, d22 = e11/delta + // [d21, d22] + const Scalar delta = e11 * e22 - e21 * e21; + std::swap(e11, e22); + e11 /= delta; + e22 /= delta; + e21 = -e21 / delta; + } + + // Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE + int gaussian_elimination_1x1(Index k) + { + // D = 1 / A[k, k] + const Scalar akk = diag_coeff(k); + // Return NUMERICAL_ISSUE if not invertible + if (akk == Scalar(0)) + return NUMERICAL_ISSUE; + + diag_coeff(k) = Scalar(1) / akk; + + // B -= l * l' / A[k, k], B := A[(k+1):end, (k+1):end], l := L[(k+1):end, k] + Scalar* lptr = col_pointer(k) + 1; + const Index ldim = m_n - k - 1; + MapVec l(lptr, ldim); + for (Index j = 0; j < ldim; j++) + { + MapVec(col_pointer(j + k + 1), ldim - j).noalias() -= (lptr[j] / akk) * l.tail(ldim - j); + } + + // l /= A[k, k] + l /= akk; + + return SUCCESSFUL; + } + + // Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE + int gaussian_elimination_2x2(Index k) + { + // D = inv(E) + Scalar& e11 = diag_coeff(k); + Scalar& e21 = coeff(k + 1, k); + Scalar& e22 = diag_coeff(k + 1); + // Return NUMERICAL_ISSUE if not invertible + if (e11 * e22 - e21 * e21 == Scalar(0)) + return NUMERICAL_ISSUE; + + inverse_inplace_2x2(e11, e21, e22); + + // X = l * inv(E), l := L[(k+2):end, k:(k+1)] + Scalar* l1ptr = &coeff(k + 2, k); + Scalar* l2ptr = &coeff(k + 2, k + 1); + const Index ldim = m_n - k - 2; + MapVec l1(l1ptr, ldim), l2(l2ptr, ldim); + + Eigen::Matrix X(ldim, 2); + X.col(0).noalias() = l1 * e11 + l2 * e21; + X.col(1).noalias() = l1 * e21 + l2 * e22; + + // B -= l * inv(E) * l' = X * l', B = A[(k+2):end, (k+2):end] + for (Index j = 0; j < ldim; j++) + { + MapVec(col_pointer(j + k + 2), ldim - j).noalias() -= (X.col(0).tail(ldim - j) * l1ptr[j] + X.col(1).tail(ldim - j) * l2ptr[j]); + } + + // l = X + l1.noalias() = X.col(0); + l2.noalias() = X.col(1); + + return SUCCESSFUL; + } + +public: + BKLDLT() : + m_n(0), m_computed(false), m_info(NOT_COMPUTED) + {} + + // Factorize mat - shift * I + BKLDLT(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) : + m_n(mat.rows()), m_computed(false), m_info(NOT_COMPUTED) + { + compute(mat, uplo, shift); + } + + void compute(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) + { + using std::abs; + + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("BKLDLT: matrix must be square"); + + m_perm.setLinSpaced(m_n, 0, m_n - 1); + m_permc.clear(); + + // Copy data + m_data.resize((m_n * (m_n + 1)) / 2); + compute_pointer(); + copy_data(mat, uplo, shift); + + const Scalar alpha = (1.0 + std::sqrt(17.0)) / 8.0; + Index k = 0; + for (k = 0; k < m_n - 1; k++) + { + // 1. Interchange rows and columns of A, and save the result to m_perm + bool is_1x1 = permutate_mat(k, alpha); + + // 2. Gaussian elimination + if (is_1x1) + { + m_info = gaussian_elimination_1x1(k); + } + else + { + m_info = gaussian_elimination_2x2(k); + k++; + } + + // 3. Check status + if (m_info != SUCCESSFUL) + break; + } + // Invert the last 1x1 block if it exists + if (k == m_n - 1) + { + const Scalar akk = diag_coeff(k); + if (akk == Scalar(0)) + m_info = NUMERICAL_ISSUE; + + diag_coeff(k) = Scalar(1) / diag_coeff(k); + } + + compress_permutation(); + + m_computed = true; + } + + // Solve Ax=b + void solve_inplace(GenericVector b) const + { + if (!m_computed) + throw std::logic_error("BKLDLT: need to call compute() first"); + + // PAP' = LDL' + // 1. b -> Pb + Scalar* x = b.data(); + MapVec res(x, m_n); + Index npermc = m_permc.size(); + for (Index i = 0; i < npermc; i++) + { + std::swap(x[m_permc[i].first], x[m_permc[i].second]); + } + + // 2. Lz = Pb + // If m_perm[end] < 0, then end with m_n - 3, otherwise end with m_n - 2 + const Index end = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2); + for (Index i = 0; i <= end; i++) + { + const Index b1size = m_n - i - 1; + const Index b2size = b1size - 1; + if (m_perm[i] >= 0) + { + MapConstVec l(&coeff(i + 1, i), b1size); + res.segment(i + 1, b1size).noalias() -= l * x[i]; + } + else + { + MapConstVec l1(&coeff(i + 2, i), b2size); + MapConstVec l2(&coeff(i + 2, i + 1), b2size); + res.segment(i + 2, b2size).noalias() -= (l1 * x[i] + l2 * x[i + 1]); + i++; + } + } + + // 3. Dw = z + for (Index i = 0; i < m_n; i++) + { + const Scalar e11 = diag_coeff(i); + if (m_perm[i] >= 0) + { + x[i] *= e11; + } + else + { + const Scalar e21 = coeff(i + 1, i), e22 = diag_coeff(i + 1); + const Scalar wi = x[i] * e11 + x[i + 1] * e21; + x[i + 1] = x[i] * e21 + x[i + 1] * e22; + x[i] = wi; + i++; + } + } + + // 4. L'y = w + // If m_perm[end] < 0, then start with m_n - 3, otherwise start with m_n - 2 + Index i = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2); + for (; i >= 0; i--) + { + const Index ldim = m_n - i - 1; + MapConstVec l(&coeff(i + 1, i), ldim); + x[i] -= res.segment(i + 1, ldim).dot(l); + + if (m_perm[i] < 0) + { + MapConstVec l2(&coeff(i + 1, i - 1), ldim); + x[i - 1] -= res.segment(i + 1, ldim).dot(l2); + i--; + } + } + + // 5. x = P'y + for (Index i = npermc - 1; i >= 0; i--) + { + std::swap(x[m_permc[i].first], x[m_permc[i].second]); + } + } + + Vector solve(ConstGenericVector& b) const + { + Vector res = b; + solve_inplace(res); + return res; + } + + int info() const { return m_info; } +}; + +} // namespace Spectra + +#endif // BK_LDLT_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h b/gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h new file mode 100644 index 000000000..2845688b4 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h @@ -0,0 +1,384 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DOUBLE_SHIFT_QR_H +#define DOUBLE_SHIFT_QR_H + +#include +#include // std::vector +#include // std::min, std::fill, std::copy +#include // std::abs, std::sqrt, std::pow +#include // std::invalid_argument, std::logic_error + +#include "../Util/TypeTraits.h" + +namespace Spectra { + +template +class DoubleShiftQR +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Matrix3X; + typedef Eigen::Matrix Vector; + typedef Eigen::Array IntArray; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + Index m_n; // Dimension of the matrix + Matrix m_mat_H; // A copy of the matrix to be factorized + Scalar m_shift_s; // Shift constant + Scalar m_shift_t; // Shift constant + Matrix3X m_ref_u; // Householder reflectors + IntArray m_ref_nr; // How many rows does each reflector affects + // 3 - A general reflector + // 2 - A Givens rotation + // 1 - An identity transformation + const Scalar m_near_0; // a very small value, but 1.0 / m_safe_min does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, + // e.g. ~= 1e-16 for the "double" type + const Scalar m_eps_rel; + const Scalar m_eps_abs; + bool m_computed; // Whether matrix has been factorized + + void compute_reflector(const Scalar& x1, const Scalar& x2, const Scalar& x3, Index ind) + { + using std::abs; + + Scalar* u = &m_ref_u.coeffRef(0, ind); + unsigned char* nr = m_ref_nr.data(); + // In general case the reflector affects 3 rows + nr[ind] = 3; + Scalar x2x3 = Scalar(0); + // If x3 is zero, decrease nr by 1 + if (abs(x3) < m_near_0) + { + // If x2 is also zero, nr will be 1, and we can exit this function + if (abs(x2) < m_near_0) + { + nr[ind] = 1; + return; + } + else + { + nr[ind] = 2; + } + x2x3 = abs(x2); + } + else + { + x2x3 = Eigen::numext::hypot(x2, x3); + } + + // x1' = x1 - rho * ||x|| + // rho = -sign(x1), if x1 == 0, we choose rho = 1 + Scalar x1_new = x1 - ((x1 <= 0) - (x1 > 0)) * Eigen::numext::hypot(x1, x2x3); + Scalar x_norm = Eigen::numext::hypot(x1_new, x2x3); + // Double check the norm of new x + if (x_norm < m_near_0) + { + nr[ind] = 1; + return; + } + u[0] = x1_new / x_norm; + u[1] = x2 / x_norm; + u[2] = x3 / x_norm; + } + + void compute_reflector(const Scalar* x, Index ind) + { + compute_reflector(x[0], x[1], x[2], ind); + } + + // Update the block X = H(il:iu, il:iu) + void update_block(Index il, Index iu) + { + // Block size + const Index bsize = iu - il + 1; + + // If block size == 1, there is no need to apply reflectors + if (bsize == 1) + { + m_ref_nr.coeffRef(il) = 1; + return; + } + + const Scalar x00 = m_mat_H.coeff(il, il), + x01 = m_mat_H.coeff(il, il + 1), + x10 = m_mat_H.coeff(il + 1, il), + x11 = m_mat_H.coeff(il + 1, il + 1); + // m00 = x00 * (x00 - s) + x01 * x10 + t + const Scalar m00 = x00 * (x00 - m_shift_s) + x01 * x10 + m_shift_t; + // m10 = x10 * (x00 + x11 - s) + const Scalar m10 = x10 * (x00 + x11 - m_shift_s); + + // For block size == 2, do a Givens rotation on M = X * X - s * X + t * I + if (bsize == 2) + { + // This causes nr=2 + compute_reflector(m00, m10, 0, il); + // Apply the reflector to X + apply_PX(m_mat_H.block(il, il, 2, m_n - il), m_n, il); + apply_XP(m_mat_H.block(0, il, il + 2, 2), m_n, il); + + m_ref_nr.coeffRef(il + 1) = 1; + return; + } + + // For block size >=3, use the regular strategy + // m20 = x21 * x10 + const Scalar m20 = m_mat_H.coeff(il + 2, il + 1) * m_mat_H.coeff(il + 1, il); + compute_reflector(m00, m10, m20, il); + + // Apply the first reflector + apply_PX(m_mat_H.block(il, il, 3, m_n - il), m_n, il); + apply_XP(m_mat_H.block(0, il, il + std::min(bsize, Index(4)), 3), m_n, il); + + // Calculate the following reflectors + // If entering this loop, block size is at least 4. + for (Index i = 1; i < bsize - 2; i++) + { + compute_reflector(&m_mat_H.coeffRef(il + i, il + i - 1), il + i); + // Apply the reflector to X + apply_PX(m_mat_H.block(il + i, il + i - 1, 3, m_n - il - i + 1), m_n, il + i); + apply_XP(m_mat_H.block(0, il + i, il + std::min(bsize, Index(i + 4)), 3), m_n, il + i); + } + + // The last reflector + // This causes nr=2 + compute_reflector(m_mat_H.coeff(iu - 1, iu - 2), m_mat_H.coeff(iu, iu - 2), 0, iu - 1); + // Apply the reflector to X + apply_PX(m_mat_H.block(iu - 1, iu - 2, 2, m_n - iu + 2), m_n, iu - 1); + apply_XP(m_mat_H.block(0, iu - 1, il + bsize, 2), m_n, iu - 1); + + m_ref_nr.coeffRef(iu) = 1; + } + + // P = I - 2 * u * u' = P' + // PX = X - 2 * u * (u'X) + void apply_PX(GenericMatrix X, Index stride, Index u_ind) const + { + const Index nr = m_ref_nr.coeff(u_ind); + if (nr == 1) + return; + + const Scalar u0 = m_ref_u.coeff(0, u_ind), + u1 = m_ref_u.coeff(1, u_ind); + const Scalar u0_2 = Scalar(2) * u0, + u1_2 = Scalar(2) * u1; + + const Index nrow = X.rows(); + const Index ncol = X.cols(); + + Scalar* xptr = X.data(); + if (nr == 2 || nrow == 2) + { + for (Index i = 0; i < ncol; i++, xptr += stride) + { + const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1]; + xptr[0] -= tmp * u0; + xptr[1] -= tmp * u1; + } + } + else + { + const Scalar u2 = m_ref_u.coeff(2, u_ind); + const Scalar u2_2 = Scalar(2) * u2; + for (Index i = 0; i < ncol; i++, xptr += stride) + { + const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1] + u2_2 * xptr[2]; + xptr[0] -= tmp * u0; + xptr[1] -= tmp * u1; + xptr[2] -= tmp * u2; + } + } + } + + // x is a pointer to a vector + // Px = x - 2 * dot(x, u) * u + void apply_PX(Scalar* x, Index u_ind) const + { + const Index nr = m_ref_nr.coeff(u_ind); + if (nr == 1) + return; + + const Scalar u0 = m_ref_u.coeff(0, u_ind), + u1 = m_ref_u.coeff(1, u_ind), + u2 = m_ref_u.coeff(2, u_ind); + + // When the reflector only contains two elements, u2 has been set to zero + const bool nr_is_2 = (nr == 2); + const Scalar dot2 = Scalar(2) * (x[0] * u0 + x[1] * u1 + (nr_is_2 ? 0 : (x[2] * u2))); + x[0] -= dot2 * u0; + x[1] -= dot2 * u1; + if (!nr_is_2) + x[2] -= dot2 * u2; + } + + // XP = X - 2 * (X * u) * u' + void apply_XP(GenericMatrix X, Index stride, Index u_ind) const + { + const Index nr = m_ref_nr.coeff(u_ind); + if (nr == 1) + return; + + const Scalar u0 = m_ref_u.coeff(0, u_ind), + u1 = m_ref_u.coeff(1, u_ind); + const Scalar u0_2 = Scalar(2) * u0, + u1_2 = Scalar(2) * u1; + + const int nrow = X.rows(); + const int ncol = X.cols(); + Scalar *X0 = X.data(), *X1 = X0 + stride; // X0 => X.col(0), X1 => X.col(1) + + if (nr == 2 || ncol == 2) + { + // tmp = 2 * u0 * X0 + 2 * u1 * X1 + // X0 => X0 - u0 * tmp + // X1 => X1 - u1 * tmp + for (Index i = 0; i < nrow; i++) + { + const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i]; + X0[i] -= tmp * u0; + X1[i] -= tmp * u1; + } + } + else + { + Scalar* X2 = X1 + stride; // X2 => X.col(2) + const Scalar u2 = m_ref_u.coeff(2, u_ind); + const Scalar u2_2 = Scalar(2) * u2; + for (Index i = 0; i < nrow; i++) + { + const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i] + u2_2 * X2[i]; + X0[i] -= tmp * u0; + X1[i] -= tmp * u1; + X2[i] -= tmp * u2; + } + } + } + +public: + DoubleShiftQR(Index size) : + m_n(size), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps_rel(m_eps), + m_eps_abs(m_near_0 * (m_n / m_eps)), + m_computed(false) + {} + + DoubleShiftQR(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) : + m_n(mat.rows()), + m_mat_H(m_n, m_n), + m_shift_s(s), + m_shift_t(t), + m_ref_u(3, m_n), + m_ref_nr(m_n), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps_rel(m_eps), + m_eps_abs(m_near_0 * (m_n / m_eps)), + m_computed(false) + { + compute(mat, s, t); + } + + void compute(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) + { + using std::abs; + + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("DoubleShiftQR: matrix must be square"); + + m_mat_H.resize(m_n, m_n); + m_shift_s = s; + m_shift_t = t; + m_ref_u.resize(3, m_n); + m_ref_nr.resize(m_n); + + // Make a copy of mat + std::copy(mat.data(), mat.data() + mat.size(), m_mat_H.data()); + + // Obtain the indices of zero elements in the subdiagonal, + // so that H can be divided into several blocks + std::vector zero_ind; + zero_ind.reserve(m_n - 1); + zero_ind.push_back(0); + Scalar* Hii = m_mat_H.data(); + for (Index i = 0; i < m_n - 2; i++, Hii += (m_n + 1)) + { + // Hii[1] => m_mat_H(i + 1, i) + const Scalar h = abs(Hii[1]); + if (h <= 0 || h <= m_eps_rel * (abs(Hii[0]) + abs(Hii[m_n + 1]))) + { + Hii[1] = 0; + zero_ind.push_back(i + 1); + } + // Make sure m_mat_H is upper Hessenberg + // Zero the elements below m_mat_H(i + 1, i) + std::fill(Hii + 2, Hii + m_n - i, Scalar(0)); + } + zero_ind.push_back(m_n); + + for (std::vector::size_type i = 0; i < zero_ind.size() - 1; i++) + { + const Index start = zero_ind[i]; + const Index end = zero_ind[i + 1] - 1; + // Compute refelctors and update each block + update_block(start, end); + } + + m_computed = true; + } + + void matrix_QtHQ(Matrix& dest) const + { + if (!m_computed) + throw std::logic_error("DoubleShiftQR: need to call compute() first"); + + dest.noalias() = m_mat_H; + } + + // Q = P0 * P1 * ... + // Q'y = P_{n-2} * ... * P1 * P0 * y + void apply_QtY(Vector& y) const + { + if (!m_computed) + throw std::logic_error("DoubleShiftQR: need to call compute() first"); + + Scalar* y_ptr = y.data(); + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++, y_ptr++) + { + apply_PX(y_ptr, i); + } + } + + // Q = P0 * P1 * ... + // YQ = Y * P0 * P1 * ... + void apply_YQ(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("DoubleShiftQR: need to call compute() first"); + + const Index nrow = Y.rows(); + const Index n2 = m_n - 2; + for (Index i = 0; i < n2; i++) + { + apply_XP(Y.block(0, i, nrow, 3), nrow, i); + } + apply_XP(Y.block(0, n2, nrow, 2), nrow, n2); + } +}; + +} // namespace Spectra + +#endif // DOUBLE_SHIFT_QR_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/Lanczos.h b/gtsam/3rdparty/Spectra/LinAlg/Lanczos.h new file mode 100644 index 000000000..53cf52be8 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/Lanczos.h @@ -0,0 +1,167 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef LANCZOS_H +#define LANCZOS_H + +#include +#include // std::sqrt +#include // std::invalid_argument +#include // std::stringstream + +#include "Arnoldi.h" + +namespace Spectra { + +// Lanczos factorization A * V = V * H + f * e' +// A: n x n +// V: n x k +// H: k x k +// f: n x 1 +// e: [0, ..., 0, 1] +// V and H are allocated of dimension m, so the maximum value of k is m +template +class Lanczos : public Arnoldi +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstMat; + typedef Eigen::Map MapConstVec; + + using Arnoldi::m_op; + using Arnoldi::m_n; + using Arnoldi::m_m; + using Arnoldi::m_k; + using Arnoldi::m_fac_V; + using Arnoldi::m_fac_H; + using Arnoldi::m_fac_f; + using Arnoldi::m_beta; + using Arnoldi::m_near_0; + using Arnoldi::m_eps; + +public: + Lanczos(const ArnoldiOpType& op, Index m) : + Arnoldi(op, m) + {} + + // Lanczos factorization starting from step-k + void factorize_from(Index from_k, Index to_m, Index& op_counter) + { + using std::sqrt; + + if (to_m <= from_k) + return; + + if (from_k > m_k) + { + std::stringstream msg; + msg << "Lanczos: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")"; + throw std::invalid_argument(msg.str()); + } + + const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n)); + + // Pre-allocate vectors + Vector Vf(to_m); + Vector w(m_n); + + // Keep the upperleft k x k submatrix of H and set other elements to 0 + m_fac_H.rightCols(m_m - from_k).setZero(); + m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero(); + + for (Index i = from_k; i <= to_m - 1; i++) + { + bool restart = false; + // If beta = 0, then the next V is not full rank + // We need to generate a new residual vector that is orthogonal + // to the current V, which we call a restart + if (m_beta < m_near_0) + { + MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns + this->expand_basis(V, 2 * i, m_fac_f, m_beta); + restart = true; + } + + // v <- f / ||f|| + MapVec v(&m_fac_V(0, i), m_n); // The (i+1)-th column + v.noalias() = m_fac_f / m_beta; + + // Note that H[i+1, i] equals to the unrestarted beta + m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta; + + // w <- A * v + m_op.perform_op(v.data(), w.data()); + op_counter++; + + // H[i+1, i+1] = = v'Bw + m_fac_H(i - 1, i) = m_fac_H(i, i - 1); // Due to symmetry + m_fac_H(i, i) = m_op.inner_product(v, w); + + // f <- w - V * V'Bw = w - H[i+1, i] * V{i} - H[i+1, i+1] * V{i+1} + // If restarting, we know that H[i+1, i] = 0 + if (restart) + m_fac_f.noalias() = w - m_fac_H(i, i) * v; + else + m_fac_f.noalias() = w - m_fac_H(i, i - 1) * m_fac_V.col(i - 1) - m_fac_H(i, i) * v; + + m_beta = m_op.norm(m_fac_f); + + // f/||f|| is going to be the next column of V, so we need to test + // whether V'B(f/||f||) ~= 0 + const Index i1 = i + 1; + MapMat Vs(m_fac_V.data(), m_n, i1); // The first (i+1) columns + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + // If not, iteratively correct the residual + int count = 0; + while (count < 5 && ortho_err > m_eps * m_beta) + { + // There is an edge case: when beta=||f|| is close to zero, f mostly consists + // of noises of rounding errors, so the test [ortho_err < eps * beta] is very + // likely to fail. In particular, if beta=0, then the test is ensured to fail. + // Hence when this happens, we force f to be zero, and then restart in the + // next iteration. + if (m_beta < beta_thresh) + { + m_fac_f.setZero(); + m_beta = Scalar(0); + break; + } + + // f <- f - V * Vf + m_fac_f.noalias() -= Vs * Vf.head(i1); + // h <- h + Vf + m_fac_H(i - 1, i) += Vf[i - 1]; + m_fac_H(i, i - 1) = m_fac_H(i - 1, i); + m_fac_H(i, i) += Vf[i]; + // beta <- ||f|| + m_beta = m_op.norm(m_fac_f); + + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + count++; + } + } + + // Indicate that this is a step-m factorization + m_k = to_m; + } + + // Apply H -> Q'HQ, where Q is from a tridiagonal QR decomposition + void compress_H(const TridiagQR& decomp) + { + decomp.matrix_QtHQ(m_fac_H); + m_k--; + } +}; + +} // namespace Spectra + +#endif // LANCZOS_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h b/gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h new file mode 100644 index 000000000..122e0e551 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h @@ -0,0 +1,219 @@ +// The code was adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2010 Jitse Niesen +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef TRIDIAG_EIGEN_H +#define TRIDIAG_EIGEN_H + +#include +#include +#include + +#include "../Util/TypeTraits.h" + +namespace Spectra { + +template +class TridiagEigen +{ +private: + typedef Eigen::Index Index; + // For convenience in adapting the tridiagonal_qr_step() function + typedef Scalar RealScalar; + + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + Index m_n; + Vector m_main_diag; // Main diagonal elements of the matrix + Vector m_sub_diag; // Sub-diagonal elements of the matrix + Matrix m_evecs; // To store eigenvectors + + bool m_computed; + const Scalar m_near_0; // a very small value, ~= 1e-307 for the "double" type + + // Adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h + static void tridiagonal_qr_step(RealScalar* diag, + RealScalar* subdiag, Index start, + Index end, Scalar* matrixQ, + Index n) + { + using std::abs; + + RealScalar td = (diag[end - 1] - diag[end]) * RealScalar(0.5); + RealScalar e = subdiag[end - 1]; + // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still + // underflow thus leading to inf/NaN values when using the following commented code: + // RealScalar e2 = numext::abs2(subdiag[end-1]); + // RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2)); + // This explain the following, somewhat more complicated, version: + RealScalar mu = diag[end]; + if (td == Scalar(0)) + mu -= abs(e); + else + { + RealScalar e2 = Eigen::numext::abs2(subdiag[end - 1]); + RealScalar h = Eigen::numext::hypot(td, e); + if (e2 == RealScalar(0)) + mu -= (e / (td + (td > RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h); + else + mu -= e2 / (td + (td > RealScalar(0) ? h : -h)); + } + + RealScalar x = diag[start] - mu; + RealScalar z = subdiag[start]; + Eigen::Map q(matrixQ, n, n); + for (Index k = start; k < end; ++k) + { + Eigen::JacobiRotation rot; + rot.makeGivens(x, z); + + const RealScalar s = rot.s(); + const RealScalar c = rot.c(); + + // do T = G' T G + RealScalar sdk = s * diag[k] + c * subdiag[k]; + RealScalar dkp1 = s * subdiag[k] + c * diag[k + 1]; + + diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k + 1]); + diag[k + 1] = s * sdk + c * dkp1; + subdiag[k] = c * sdk - s * dkp1; + + if (k > start) + subdiag[k - 1] = c * subdiag[k - 1] - s * z; + + x = subdiag[k]; + + if (k < end - 1) + { + z = -s * subdiag[k + 1]; + subdiag[k + 1] = c * subdiag[k + 1]; + } + + // apply the givens rotation to the unit matrix Q = Q * G + if (matrixQ) + q.applyOnTheRight(k, k + 1, rot); + } + } + +public: + TridiagEigen() : + m_n(0), m_computed(false), + m_near_0(TypeTraits::min() * Scalar(10)) + {} + + TridiagEigen(ConstGenericMatrix& mat) : + m_n(mat.rows()), m_computed(false), + m_near_0(TypeTraits::min() * Scalar(10)) + { + compute(mat); + } + + void compute(ConstGenericMatrix& mat) + { + using std::abs; + + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("TridiagEigen: matrix must be square"); + + m_main_diag.resize(m_n); + m_sub_diag.resize(m_n - 1); + m_evecs.resize(m_n, m_n); + m_evecs.setIdentity(); + + // Scale matrix to improve stability + const Scalar scale = std::max(mat.diagonal().cwiseAbs().maxCoeff(), + mat.diagonal(-1).cwiseAbs().maxCoeff()); + // If scale=0, mat is a zero matrix, so we can early stop + if (scale < m_near_0) + { + // m_main_diag contains eigenvalues + m_main_diag.setZero(); + // m_evecs has been set identity + // m_evecs.setIdentity(); + m_computed = true; + return; + } + m_main_diag.noalias() = mat.diagonal() / scale; + m_sub_diag.noalias() = mat.diagonal(-1) / scale; + + Scalar* diag = m_main_diag.data(); + Scalar* subdiag = m_sub_diag.data(); + + Index end = m_n - 1; + Index start = 0; + Index iter = 0; // total number of iterations + int info = 0; // 0 for success, 1 for failure + + const Scalar considerAsZero = TypeTraits::min(); + const Scalar precision = Scalar(2) * Eigen::NumTraits::epsilon(); + + while (end > 0) + { + for (Index i = start; i < end; i++) + if (abs(subdiag[i]) <= considerAsZero || + abs(subdiag[i]) <= (abs(diag[i]) + abs(diag[i + 1])) * precision) + subdiag[i] = 0; + + // find the largest unreduced block + while (end > 0 && subdiag[end - 1] == Scalar(0)) + end--; + + if (end <= 0) + break; + + // if we spent too many iterations, we give up + iter++; + if (iter > 30 * m_n) + { + info = 1; + break; + } + + start = end - 1; + while (start > 0 && subdiag[start - 1] != Scalar(0)) + start--; + + tridiagonal_qr_step(diag, subdiag, start, end, m_evecs.data(), m_n); + } + + if (info > 0) + throw std::runtime_error("TridiagEigen: eigen decomposition failed"); + + // Scale eigenvalues back + m_main_diag *= scale; + + m_computed = true; + } + + const Vector& eigenvalues() const + { + if (!m_computed) + throw std::logic_error("TridiagEigen: need to call compute() first"); + + // After calling compute(), main_diag will contain the eigenvalues. + return m_main_diag; + } + + const Matrix& eigenvectors() const + { + if (!m_computed) + throw std::logic_error("TridiagEigen: need to call compute() first"); + + return m_evecs; + } +}; + +} // namespace Spectra + +#endif // TRIDIAG_EIGEN_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h new file mode 100644 index 000000000..4865e9db8 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h @@ -0,0 +1,319 @@ +// The code was adapted from Eigen/src/Eigenvaleus/EigenSolver.h +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2010,2012 Jitse Niesen +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef UPPER_HESSENBERG_EIGEN_H +#define UPPER_HESSENBERG_EIGEN_H + +#include +#include +#include + +namespace Spectra { + +template +class UpperHessenbergEigen +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + Index m_n; // Size of the matrix + Eigen::RealSchur m_realSchur; // Schur decomposition solver + Matrix m_matT; // Schur T matrix + Matrix m_eivec; // Storing eigenvectors + ComplexVector m_eivalues; // Eigenvalues + + bool m_computed; + + void doComputeEigenvectors() + { + using std::abs; + + const Index size = m_eivec.cols(); + const Scalar eps = Eigen::NumTraits::epsilon(); + + // inefficient! this is already computed in RealSchur + Scalar norm(0); + for (Index j = 0; j < size; ++j) + { + norm += m_matT.row(j).segment((std::max)(j - 1, Index(0)), size - (std::max)(j - 1, Index(0))).cwiseAbs().sum(); + } + + // Backsubstitute to find vectors of upper triangular form + if (norm == Scalar(0)) + return; + + for (Index n = size - 1; n >= 0; n--) + { + Scalar p = m_eivalues.coeff(n).real(); + Scalar q = m_eivalues.coeff(n).imag(); + + // Scalar vector + if (q == Scalar(0)) + { + Scalar lastr(0), lastw(0); + Index l = n; + + m_matT.coeffRef(n, n) = Scalar(1); + for (Index i = n - 1; i >= 0; i--) + { + Scalar w = m_matT.coeff(i, i) - p; + Scalar r = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1)); + + if (m_eivalues.coeff(i).imag() < Scalar(0)) + { + lastw = w; + lastr = r; + } + else + { + l = i; + if (m_eivalues.coeff(i).imag() == Scalar(0)) + { + if (w != Scalar(0)) + m_matT.coeffRef(i, n) = -r / w; + else + m_matT.coeffRef(i, n) = -r / (eps * norm); + } + else // Solve real equations + { + Scalar x = m_matT.coeff(i, i + 1); + Scalar y = m_matT.coeff(i + 1, i); + Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag(); + Scalar t = (x * lastr - lastw * r) / denom; + m_matT.coeffRef(i, n) = t; + if (abs(x) > abs(lastw)) + m_matT.coeffRef(i + 1, n) = (-r - w * t) / x; + else + m_matT.coeffRef(i + 1, n) = (-lastr - y * t) / lastw; + } + + // Overflow control + Scalar t = abs(m_matT.coeff(i, n)); + if ((eps * t) * t > Scalar(1)) + m_matT.col(n).tail(size - i) /= t; + } + } + } + else if (q < Scalar(0) && n > 0) + { // Complex vector + Scalar lastra(0), lastsa(0), lastw(0); + Index l = n - 1; + + // Last vector component imaginary so matrix is triangular + if (abs(m_matT.coeff(n, n - 1)) > abs(m_matT.coeff(n - 1, n))) + { + m_matT.coeffRef(n - 1, n - 1) = q / m_matT.coeff(n, n - 1); + m_matT.coeffRef(n - 1, n) = -(m_matT.coeff(n, n) - p) / m_matT.coeff(n, n - 1); + } + else + { + Complex cc = Complex(Scalar(0), -m_matT.coeff(n - 1, n)) / Complex(m_matT.coeff(n - 1, n - 1) - p, q); + m_matT.coeffRef(n - 1, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(n - 1, n) = Eigen::numext::imag(cc); + } + m_matT.coeffRef(n, n - 1) = Scalar(0); + m_matT.coeffRef(n, n) = Scalar(1); + for (Index i = n - 2; i >= 0; i--) + { + Scalar ra = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n - 1).segment(l, n - l + 1)); + Scalar sa = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1)); + Scalar w = m_matT.coeff(i, i) - p; + + if (m_eivalues.coeff(i).imag() < Scalar(0)) + { + lastw = w; + lastra = ra; + lastsa = sa; + } + else + { + l = i; + if (m_eivalues.coeff(i).imag() == Scalar(0)) + { + Complex cc = Complex(-ra, -sa) / Complex(w, q); + m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(i, n) = Eigen::numext::imag(cc); + } + else + { + // Solve complex equations + Scalar x = m_matT.coeff(i, i + 1); + Scalar y = m_matT.coeff(i + 1, i); + Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q; + Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q; + if ((vr == Scalar(0)) && (vi == Scalar(0))) + vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw)); + + Complex cc = Complex(x * lastra - lastw * ra + q * sa, x * lastsa - lastw * sa - q * ra) / Complex(vr, vi); + m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(i, n) = Eigen::numext::imag(cc); + if (abs(x) > (abs(lastw) + abs(q))) + { + m_matT.coeffRef(i + 1, n - 1) = (-ra - w * m_matT.coeff(i, n - 1) + q * m_matT.coeff(i, n)) / x; + m_matT.coeffRef(i + 1, n) = (-sa - w * m_matT.coeff(i, n) - q * m_matT.coeff(i, n - 1)) / x; + } + else + { + cc = Complex(-lastra - y * m_matT.coeff(i, n - 1), -lastsa - y * m_matT.coeff(i, n)) / Complex(lastw, q); + m_matT.coeffRef(i + 1, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(i + 1, n) = Eigen::numext::imag(cc); + } + } + + // Overflow control + Scalar t = std::max(abs(m_matT.coeff(i, n - 1)), abs(m_matT.coeff(i, n))); + if ((eps * t) * t > Scalar(1)) + m_matT.block(i, n - 1, size - i, 2) /= t; + } + } + + // We handled a pair of complex conjugate eigenvalues, so need to skip them both + n--; + } + } + + // Back transformation to get eigenvectors of original matrix + Vector m_tmp(size); + for (Index j = size - 1; j >= 0; j--) + { + m_tmp.noalias() = m_eivec.leftCols(j + 1) * m_matT.col(j).segment(0, j + 1); + m_eivec.col(j) = m_tmp; + } + } + +public: + UpperHessenbergEigen() : + m_n(0), m_computed(false) + {} + + UpperHessenbergEigen(ConstGenericMatrix& mat) : + m_n(mat.rows()), m_computed(false) + { + compute(mat); + } + + void compute(ConstGenericMatrix& mat) + { + using std::abs; + using std::sqrt; + + if (mat.rows() != mat.cols()) + throw std::invalid_argument("UpperHessenbergEigen: matrix must be square"); + + m_n = mat.rows(); + // Scale matrix prior to the Schur decomposition + const Scalar scale = mat.cwiseAbs().maxCoeff(); + + // Reduce to real Schur form + Matrix Q = Matrix::Identity(m_n, m_n); + m_realSchur.computeFromHessenberg(mat / scale, Q, true); + if (m_realSchur.info() != Eigen::Success) + throw std::runtime_error("UpperHessenbergEigen: eigen decomposition failed"); + + m_matT = m_realSchur.matrixT(); + m_eivec = m_realSchur.matrixU(); + + // Compute eigenvalues from matT + m_eivalues.resize(m_n); + Index i = 0; + while (i < m_n) + { + // Real eigenvalue + if (i == m_n - 1 || m_matT.coeff(i + 1, i) == Scalar(0)) + { + m_eivalues.coeffRef(i) = m_matT.coeff(i, i); + ++i; + } + else // Complex eigenvalues + { + Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i + 1, i + 1)); + Scalar z; + // Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1))); + // without overflow + { + Scalar t0 = m_matT.coeff(i + 1, i); + Scalar t1 = m_matT.coeff(i, i + 1); + Scalar maxval = std::max(abs(p), std::max(abs(t0), abs(t1))); + t0 /= maxval; + t1 /= maxval; + Scalar p0 = p / maxval; + z = maxval * sqrt(abs(p0 * p0 + t0 * t1)); + } + m_eivalues.coeffRef(i) = Complex(m_matT.coeff(i + 1, i + 1) + p, z); + m_eivalues.coeffRef(i + 1) = Complex(m_matT.coeff(i + 1, i + 1) + p, -z); + i += 2; + } + } + + // Compute eigenvectors + doComputeEigenvectors(); + + // Scale eigenvalues back + m_eivalues *= scale; + + m_computed = true; + } + + const ComplexVector& eigenvalues() const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergEigen: need to call compute() first"); + + return m_eivalues; + } + + ComplexMatrix eigenvectors() + { + using std::abs; + + if (!m_computed) + throw std::logic_error("UpperHessenbergEigen: need to call compute() first"); + + Index n = m_eivec.cols(); + ComplexMatrix matV(n, n); + for (Index j = 0; j < n; ++j) + { + // imaginary part of real eigenvalue is already set to exact zero + if (Eigen::numext::imag(m_eivalues.coeff(j)) == Scalar(0) || j + 1 == n) + { + // we have a real eigen value + matV.col(j) = m_eivec.col(j).template cast(); + matV.col(j).normalize(); + } + else + { + // we have a pair of complex eigen values + for (Index i = 0; i < n; ++i) + { + matV.coeffRef(i, j) = Complex(m_eivec.coeff(i, j), m_eivec.coeff(i, j + 1)); + matV.coeffRef(i, j + 1) = Complex(m_eivec.coeff(i, j), -m_eivec.coeff(i, j + 1)); + } + matV.col(j).normalize(); + matV.col(j + 1).normalize(); + ++j; + } + } + + return matV; + } +}; + +} // namespace Spectra + +#endif // UPPER_HESSENBERG_EIGEN_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h new file mode 100644 index 000000000..5778f11dc --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h @@ -0,0 +1,670 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef UPPER_HESSENBERG_QR_H +#define UPPER_HESSENBERG_QR_H + +#include +#include // std::sqrt +#include // std::fill, std::copy +#include // std::logic_error + +namespace Spectra { + +/// +/// \defgroup Internals Internal Classes +/// +/// Classes for internal use. May be useful to developers. +/// + +/// +/// \ingroup Internals +/// @{ +/// + +/// +/// \defgroup LinearAlgebra Linear Algebra +/// +/// A number of classes for linear algebra operations. +/// + +/// +/// \ingroup LinearAlgebra +/// +/// Perform the QR decomposition of an upper Hessenberg matrix. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// +template +class UpperHessenbergQR +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Matrix RowVector; + typedef Eigen::Array Array; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + Matrix m_mat_T; + +protected: + Index m_n; + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + // Q = G1 * G2 * ... * G_{n-1} + Scalar m_shift; + Array m_rot_cos; + Array m_rot_sin; + bool m_computed; + + // Given x and y, compute 1) r = sqrt(x^2 + y^2), 2) c = x / r, 3) s = -y / r + // If both x and y are zero, set c = 1 and s = 0 + // We must implement it in a numerically stable way + static void compute_rotation(const Scalar& x, const Scalar& y, Scalar& r, Scalar& c, Scalar& s) + { + using std::sqrt; + + const Scalar xsign = (x > Scalar(0)) - (x < Scalar(0)); + const Scalar ysign = (y > Scalar(0)) - (y < Scalar(0)); + const Scalar xabs = x * xsign; + const Scalar yabs = y * ysign; + if (xabs > yabs) + { + // In this case xabs != 0 + const Scalar ratio = yabs / xabs; // so that 0 <= ratio < 1 + const Scalar common = sqrt(Scalar(1) + ratio * ratio); + c = xsign / common; + r = xabs * common; + s = -y / r; + } + else + { + if (yabs == Scalar(0)) + { + r = Scalar(0); + c = Scalar(1); + s = Scalar(0); + return; + } + const Scalar ratio = xabs / yabs; // so that 0 <= ratio <= 1 + const Scalar common = sqrt(Scalar(1) + ratio * ratio); + s = -ysign / common; + r = yabs * common; + c = x / r; + } + } + +public: + /// + /// Constructor to preallocate memory. Computation can + /// be performed later by calling the compute() method. + /// + UpperHessenbergQR(Index size) : + m_n(size), + m_rot_cos(m_n - 1), + m_rot_sin(m_n - 1), + m_computed(false) + {} + + /// + /// Constructor to create an object that performs and stores the + /// QR decomposition of an upper Hessenberg matrix `mat`, with an + /// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix + /// `mat`, and \f$s\f$ is the shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the upper triangular and the lower subdiagonal parts of + /// the matrix are used. + /// + UpperHessenbergQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) : + m_n(mat.rows()), + m_shift(shift), + m_rot_cos(m_n - 1), + m_rot_sin(m_n - 1), + m_computed(false) + { + compute(mat, shift); + } + + /// + /// Virtual destructor. + /// + virtual ~UpperHessenbergQR(){}; + + /// + /// Conduct the QR factorization of an upper Hessenberg matrix with + /// an optional shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the upper triangular and the lower subdiagonal parts of + /// the matrix are used. + /// + virtual void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) + { + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("UpperHessenbergQR: matrix must be square"); + + m_shift = shift; + m_mat_T.resize(m_n, m_n); + m_rot_cos.resize(m_n - 1); + m_rot_sin.resize(m_n - 1); + + // Make a copy of mat - s * I + std::copy(mat.data(), mat.data() + mat.size(), m_mat_T.data()); + m_mat_T.diagonal().array() -= m_shift; + + Scalar xi, xj, r, c, s; + Scalar *Tii, *ptr; + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + Tii = &m_mat_T.coeffRef(i, i); + + // Make sure mat_T is upper Hessenberg + // Zero the elements below mat_T(i + 1, i) + std::fill(Tii + 2, Tii + m_n - i, Scalar(0)); + + xi = Tii[0]; // mat_T(i, i) + xj = Tii[1]; // mat_T(i + 1, i) + compute_rotation(xi, xj, r, c, s); + m_rot_cos[i] = c; + m_rot_sin[i] = s; + + // For a complete QR decomposition, + // we first obtain the rotation matrix + // G = [ cos sin] + // [-sin cos] + // and then do T[i:(i + 1), i:(n - 1)] = G' * T[i:(i + 1), i:(n - 1)] + + // Gt << c, -s, s, c; + // m_mat_T.block(i, i, 2, m_n - i) = Gt * m_mat_T.block(i, i, 2, m_n - i); + Tii[0] = r; // m_mat_T(i, i) => r + Tii[1] = 0; // m_mat_T(i + 1, i) => 0 + ptr = Tii + m_n; // m_mat_T(i, k), k = i+1, i+2, ..., n-1 + for (Index j = i + 1; j < m_n; j++, ptr += m_n) + { + Scalar tmp = ptr[0]; + ptr[0] = c * tmp - s * ptr[1]; + ptr[1] = s * tmp + c * ptr[1]; + } + + // If we do not need to calculate the R matrix, then + // only the cos and sin sequences are required. + // In such case we only update T[i + 1, (i + 1):(n - 1)] + // m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) *= c; + // m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) += s * mat_T.block(i, i + 1, 1, m_n - i - 1); + } + + m_computed = true; + } + + /// + /// Return the \f$R\f$ matrix in the QR decomposition, which is an + /// upper triangular matrix. + /// + /// \return Returned matrix type will be `Eigen::Matrix`, depending on + /// the template parameter `Scalar` defined. + /// + virtual Matrix matrix_R() const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + return m_mat_T; + } + + /// + /// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`, + /// and \f$s\f$ is the shift. The result is an upper Hessenberg matrix. + /// + /// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix`, + /// depending on the template parameter `Scalar` defined. + /// + virtual void matrix_QtHQ(Matrix& dest) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + // Make a copy of the R matrix + dest.resize(m_n, m_n); + std::copy(m_mat_T.data(), m_mat_T.data() + m_mat_T.size(), dest.data()); + + // Compute the RQ matrix + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // RQ[, i:(i + 1)] = RQ[, i:(i + 1)] * Gi + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Scalar *Yi, *Yi1; + Yi = &dest.coeffRef(0, i); + Yi1 = Yi + m_n; // RQ(0, i + 1) + const Index i2 = i + 2; + for (Index j = 0; j < i2; j++) + { + const Scalar tmp = Yi[j]; + Yi[j] = c * tmp - s * Yi1[j]; + Yi1[j] = s * tmp + c * Yi1[j]; + } + + /* Vector dest = RQ.block(0, i, i + 2, 1); + dest.block(0, i, i + 2, 1) = c * Yi - s * dest.block(0, i + 1, i + 2, 1); + dest.block(0, i + 1, i + 2, 1) = s * Yi + c * dest.block(0, i + 1, i + 2, 1); */ + } + + // Add the shift to the diagonal + dest.diagonal().array() += m_shift; + } + + /// + /// Apply the \f$Q\f$ matrix to a vector \f$y\f$. + /// + /// \param Y A vector that will be overwritten by the matrix product \f$Qy\f$. + /// + /// Vector type can be `Eigen::Vector`, depending on + /// the template parameter `Scalar` defined. + /// + // Y -> QY = G1 * G2 * ... * Y + void apply_QY(Vector& Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + for (Index i = m_n - 2; i >= 0; i--) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1)] = Gi * Y[i:(i + 1)] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + const Scalar tmp = Y[i]; + Y[i] = c * tmp + s * Y[i + 1]; + Y[i + 1] = -s * tmp + c * Y[i + 1]; + } + } + + /// + /// Apply the \f$Q\f$ matrix to a vector \f$y\f$. + /// + /// \param Y A vector that will be overwritten by the matrix product \f$Q'y\f$. + /// + /// Vector type can be `Eigen::Vector`, depending on + /// the template parameter `Scalar` defined. + /// + // Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y + void apply_QtY(Vector& Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1)] = Gi' * Y[i:(i + 1)] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + const Scalar tmp = Y[i]; + Y[i] = c * tmp - s * Y[i + 1]; + Y[i + 1] = s * tmp + c * Y[i + 1]; + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$QY\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> QY = G1 * G2 * ... * Y + void apply_QY(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + RowVector Yi(Y.cols()), Yi1(Y.cols()); + for (Index i = m_n - 2; i >= 0; i--) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1), ] = Gi * Y[i:(i + 1), ] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.row(i); + Yi1.noalias() = Y.row(i + 1); + Y.row(i) = c * Yi + s * Yi1; + Y.row(i + 1) = -s * Yi + c * Yi1; + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$Q'Y\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y + void apply_QtY(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + RowVector Yi(Y.cols()), Yi1(Y.cols()); + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1), ] = Gi' * Y[i:(i + 1), ] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.row(i); + Yi1.noalias() = Y.row(i + 1); + Y.row(i) = c * Yi - s * Yi1; + Y.row(i + 1) = s * Yi + c * Yi1; + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$YQ\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> YQ = Y * G1 * G2 * ... + void apply_YQ(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + /*Vector Yi(Y.rows()); + for(Index i = 0; i < m_n - 1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.col(i); + Y.col(i) = c * Yi - s * Y.col(i + 1); + Y.col(i + 1) = s * Yi + c * Y.col(i + 1); + }*/ + Scalar *Y_col_i, *Y_col_i1; + const Index n1 = m_n - 1; + const Index nrow = Y.rows(); + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + + Y_col_i = &Y.coeffRef(0, i); + Y_col_i1 = &Y.coeffRef(0, i + 1); + for (Index j = 0; j < nrow; j++) + { + Scalar tmp = Y_col_i[j]; + Y_col_i[j] = c * tmp - s * Y_col_i1[j]; + Y_col_i1[j] = s * tmp + c * Y_col_i1[j]; + } + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$YQ'\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> YQ' = Y * G_{n-1}' * ... * G2' * G1' + void apply_YQt(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + Vector Yi(Y.rows()); + for (Index i = m_n - 2; i >= 0; i--) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi' + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.col(i); + Y.col(i) = c * Yi + s * Y.col(i + 1); + Y.col(i + 1) = -s * Yi + c * Y.col(i + 1); + } + } +}; + +/// +/// \ingroup LinearAlgebra +/// +/// Perform the QR decomposition of a tridiagonal matrix, a special +/// case of upper Hessenberg matrices. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// +template +class TridiagQR : public UpperHessenbergQR +{ +private: + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef const Eigen::Ref ConstGenericMatrix; + + typedef typename Matrix::Index Index; + + Vector m_T_diag; // diagonal elements of T + Vector m_T_lsub; // lower subdiagonal of T + Vector m_T_usub; // upper subdiagonal of T + Vector m_T_usub2; // 2nd upper subdiagonal of T + +public: + /// + /// Constructor to preallocate memory. Computation can + /// be performed later by calling the compute() method. + /// + TridiagQR(Index size) : + UpperHessenbergQR(size) + {} + + /// + /// Constructor to create an object that performs and stores the + /// QR decomposition of an upper Hessenberg matrix `mat`, with an + /// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix + /// `mat`, and \f$s\f$ is the shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the major- and sub- diagonal parts of + /// the matrix are used. + /// + TridiagQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) : + UpperHessenbergQR(mat.rows()) + { + this->compute(mat, shift); + } + + /// + /// Conduct the QR factorization of a tridiagonal matrix with an + /// optional shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the major- and sub- diagonal parts of + /// the matrix are used. + /// + void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) + { + this->m_n = mat.rows(); + if (this->m_n != mat.cols()) + throw std::invalid_argument("TridiagQR: matrix must be square"); + + this->m_shift = shift; + m_T_diag.resize(this->m_n); + m_T_lsub.resize(this->m_n - 1); + m_T_usub.resize(this->m_n - 1); + m_T_usub2.resize(this->m_n - 2); + this->m_rot_cos.resize(this->m_n - 1); + this->m_rot_sin.resize(this->m_n - 1); + + m_T_diag.array() = mat.diagonal().array() - this->m_shift; + m_T_lsub.noalias() = mat.diagonal(-1); + m_T_usub.noalias() = m_T_lsub; + + // A number of pointers to avoid repeated address calculation + Scalar *c = this->m_rot_cos.data(), // pointer to the cosine vector + *s = this->m_rot_sin.data(), // pointer to the sine vector + r; + const Index n1 = this->m_n - 1; + for (Index i = 0; i < n1; i++) + { + // diag[i] == T[i, i] + // lsub[i] == T[i + 1, i] + // r = sqrt(T[i, i]^2 + T[i + 1, i]^2) + // c = T[i, i] / r, s = -T[i + 1, i] / r + this->compute_rotation(m_T_diag.coeff(i), m_T_lsub.coeff(i), r, *c, *s); + + // For a complete QR decomposition, + // we first obtain the rotation matrix + // G = [ cos sin] + // [-sin cos] + // and then do T[i:(i + 1), i:(i + 2)] = G' * T[i:(i + 1), i:(i + 2)] + + // Update T[i, i] and T[i + 1, i] + // The updated value of T[i, i] is known to be r + // The updated value of T[i + 1, i] is known to be 0 + m_T_diag.coeffRef(i) = r; + m_T_lsub.coeffRef(i) = Scalar(0); + // Update T[i, i + 1] and T[i + 1, i + 1] + // usub[i] == T[i, i + 1] + // diag[i + 1] == T[i + 1, i + 1] + const Scalar tmp = m_T_usub.coeff(i); + m_T_usub.coeffRef(i) = (*c) * tmp - (*s) * m_T_diag.coeff(i + 1); + m_T_diag.coeffRef(i + 1) = (*s) * tmp + (*c) * m_T_diag.coeff(i + 1); + // Update T[i, i + 2] and T[i + 1, i + 2] + // usub2[i] == T[i, i + 2] + // usub[i + 1] == T[i + 1, i + 2] + if (i < n1 - 1) + { + m_T_usub2.coeffRef(i) = -(*s) * m_T_usub.coeff(i + 1); + m_T_usub.coeffRef(i + 1) *= (*c); + } + + c++; + s++; + + // If we do not need to calculate the R matrix, then + // only the cos and sin sequences are required. + // In such case we only update T[i + 1, (i + 1):(i + 2)] + // T[i + 1, i + 1] = c * T[i + 1, i + 1] + s * T[i, i + 1]; + // T[i + 1, i + 2] *= c; + } + + this->m_computed = true; + } + + /// + /// Return the \f$R\f$ matrix in the QR decomposition, which is an + /// upper triangular matrix. + /// + /// \return Returned matrix type will be `Eigen::Matrix`, depending on + /// the template parameter `Scalar` defined. + /// + Matrix matrix_R() const + { + if (!this->m_computed) + throw std::logic_error("TridiagQR: need to call compute() first"); + + Matrix R = Matrix::Zero(this->m_n, this->m_n); + R.diagonal().noalias() = m_T_diag; + R.diagonal(1).noalias() = m_T_usub; + R.diagonal(2).noalias() = m_T_usub2; + + return R; + } + + /// + /// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`, + /// and \f$s\f$ is the shift. The result is a tridiagonal matrix. + /// + /// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix`, + /// depending on the template parameter `Scalar` defined. + /// + void matrix_QtHQ(Matrix& dest) const + { + if (!this->m_computed) + throw std::logic_error("TridiagQR: need to call compute() first"); + + // Make a copy of the R matrix + dest.resize(this->m_n, this->m_n); + dest.setZero(); + dest.diagonal().noalias() = m_T_diag; + // The upper diagonal refers to m_T_usub + // The 2nd upper subdiagonal will be zero in RQ + + // Compute the RQ matrix + // [m11 m12] points to RQ[i:(i+1), i:(i+1)] + // [0 m22] + // + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + const Index n1 = this->m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = this->m_rot_cos.coeff(i); + const Scalar s = this->m_rot_sin.coeff(i); + const Scalar m11 = dest.coeff(i, i), + m12 = m_T_usub.coeff(i), + m22 = m_T_diag.coeff(i + 1); + + // Update the diagonal and the lower subdiagonal of dest + dest.coeffRef(i, i) = c * m11 - s * m12; + dest.coeffRef(i + 1, i) = -s * m22; + dest.coeffRef(i + 1, i + 1) = c * m22; + } + + // Copy the lower subdiagonal to upper subdiagonal + dest.diagonal(1).noalias() = dest.diagonal(-1); + + // Add the shift to the diagonal + dest.diagonal().array() += this->m_shift; + } +}; + +/// +/// @} +/// + +} // namespace Spectra + +#endif // UPPER_HESSENBERG_QR_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h b/gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h new file mode 100644 index 000000000..c41cae729 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h @@ -0,0 +1,108 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_CHOLESKY_H +#define DENSE_CHOLESKY_H + +#include +#include +#include +#include "../Util/CompInfo.h" + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the operations related to Cholesky decomposition on a +/// positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular +/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver +/// in the Cholesky decomposition mode. +/// +template +class DenseCholesky +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstMat; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + const Index m_n; + Eigen::LLT m_decomp; + int m_info; // status of the decomposition + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseCholesky(ConstGenericMatrix& mat) : + m_n(mat.rows()), m_info(NOT_COMPUTED) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseCholesky: matrix must be square"); + + m_decomp.compute(mat); + m_info = (m_decomp.info() == Eigen::Success) ? + SUCCESSFUL : + NUMERICAL_ISSUE; + } + + /// + /// Returns the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Returns the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Performs the lower triangular solving operation \f$y=L^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L) * x_in + void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.matrixL().solve(x); + } + + /// + /// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L') * x_in + void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.matrixU().solve(x); + } +}; + +} // namespace Spectra + +#endif // DENSE_CHOLESKY_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h new file mode 100644 index 000000000..02ad32f8f --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h @@ -0,0 +1,102 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_GEN_COMPLEX_SHIFT_SOLVE_H +#define DENSE_GEN_COMPLEX_SHIFT_SOLVE_H + +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the complex shift-solve operation on a general real matrix \f$A\f$, +/// i.e., calculating \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$ for any complex-valued +/// \f$\sigma\f$ and real-valued vector \f$x\f$. It is mainly used in the +/// GenEigsComplexShiftSolver eigen solver. +/// +template +class DenseGenComplexShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + typedef Eigen::PartialPivLU ComplexSolver; + + ConstGenericMatrix m_mat; + const Index m_n; + ComplexSolver m_solver; + ComplexVector m_x_cache; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseGenComplexShiftSolve(ConstGenericMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseGenComplexShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the complex shift \f$\sigma\f$. + /// + /// \param sigmar Real part of \f$\sigma\f$. + /// \param sigmai Imaginary part of \f$\sigma\f$. + /// + void set_shift(Scalar sigmar, Scalar sigmai) + { + m_solver.compute(m_mat.template cast() - Complex(sigmar, sigmai) * ComplexMatrix::Identity(m_n, m_n)); + m_x_cache.resize(m_n); + m_x_cache.setZero(); + } + + /// + /// Perform the complex shift-solve operation + /// \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = Re( inv(A - sigma * I) * x_in ) + void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_x_cache.real() = MapConstVec(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(m_x_cache).real(); + } +}; + +} // namespace Spectra + +#endif // DENSE_GEN_COMPLEX_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h b/gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h new file mode 100644 index 000000000..c4ade80a3 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h @@ -0,0 +1,80 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_GEN_MAT_PROD_H +#define DENSE_GEN_MAT_PROD_H + +#include + +namespace Spectra { + +/// +/// \defgroup MatOp Matrix Operations +/// +/// Define matrix operations on existing matrix objects +/// + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// general real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the GenEigsSolver and +/// SymEigsSolver eigen solvers. +/// +template +class DenseGenMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseGenMatProd(ConstGenericMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat * x; + } +}; + +} // namespace Spectra + +#endif // DENSE_GEN_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h new file mode 100644 index 000000000..9c93ecc7a --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h @@ -0,0 +1,88 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_GEN_REAL_SHIFT_SOLVE_H +#define DENSE_GEN_REAL_SHIFT_SOLVE_H + +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a general real matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver. +/// +template +class DenseGenRealShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const Index m_n; + Eigen::PartialPivLU m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseGenRealShiftSolve(ConstGenericMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseGenRealShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + m_solver.compute(m_mat - sigma * Matrix::Identity(m_n, m_n)); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // DENSE_GEN_REAL_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h b/gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h new file mode 100644 index 000000000..76c792686 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h @@ -0,0 +1,73 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_SYM_MAT_PROD_H +#define DENSE_SYM_MAT_PROD_H + +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// symmetric real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver. +/// +template +class DenseSymMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseSymMatProd(ConstGenericMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat.template selfadjointView() * x; + } +}; + +} // namespace Spectra + +#endif // DENSE_SYM_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h new file mode 100644 index 000000000..620aa9413 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h @@ -0,0 +1,92 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_SYM_SHIFT_SOLVE_H +#define DENSE_SYM_SHIFT_SOLVE_H + +#include +#include + +#include "../LinAlg/BKLDLT.h" +#include "../Util/CompInfo.h" + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a real symmetric matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver. +/// +template +class DenseSymShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_n; + BKLDLT m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseSymShiftSolve(ConstGenericMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseSymShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + m_solver.compute(m_mat, Uplo, sigma); + if (m_solver.info() != SUCCESSFUL) + throw std::invalid_argument("DenseSymShiftSolve: factorization failed with the given shift"); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // DENSE_SYM_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h b/gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h new file mode 100644 index 000000000..a73efc389 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h @@ -0,0 +1,109 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_CHOLESKY_H +#define SPARSE_CHOLESKY_H + +#include +#include +#include +#include +#include "../Util/CompInfo.h" + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the operations related to Cholesky decomposition on a +/// sparse positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular +/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver +/// in the Cholesky decomposition mode. +/// +template +class SparseCholesky +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + const Index m_n; + Eigen::SimplicialLLT m_decomp; + int m_info; // status of the decomposition + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseCholesky(ConstGenericSparseMatrix& mat) : + m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseCholesky: matrix must be square"); + + m_decomp.compute(mat); + m_info = (m_decomp.info() == Eigen::Success) ? + SUCCESSFUL : + NUMERICAL_ISSUE; + } + + /// + /// Returns the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Returns the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Performs the lower triangular solving operation \f$y=L^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L) * x_in + void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.permutationP() * x; + m_decomp.matrixL().solveInPlace(y); + } + + /// + /// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L') * x_in + void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.matrixU().solve(x); + y = m_decomp.permutationPinv() * y; + } +}; + +} // namespace Spectra + +#endif // SPARSE_CHOLESKY_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h b/gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h new file mode 100644 index 000000000..0cc1f6674 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h @@ -0,0 +1,74 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_GEN_MAT_PROD_H +#define SPARSE_GEN_MAT_PROD_H + +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// sparse real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the GenEigsSolver and SymEigsSolver +/// eigen solvers. +/// +template +class SparseGenMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseGenMatProd(ConstGenericSparseMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat * x; + } +}; + +} // namespace Spectra + +#endif // SPARSE_GEN_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h new file mode 100644 index 000000000..fc6e3c508 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h @@ -0,0 +1,93 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_GEN_REAL_SHIFT_SOLVE_H +#define SPARSE_GEN_REAL_SHIFT_SOLVE_H + +#include +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a sparse real matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver. +/// +template +class SparseGenRealShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + const int m_n; + Eigen::SparseLU m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseGenRealShiftSolve(ConstGenericSparseMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseGenRealShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + SparseMatrix I(m_n, m_n); + I.setIdentity(); + + m_solver.compute(m_mat - sigma * I); + if (m_solver.info() != Eigen::Success) + throw std::invalid_argument("SparseGenRealShiftSolve: factorization failed with the given shift"); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // SPARSE_GEN_REAL_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h b/gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h new file mode 100644 index 000000000..3abd0c177 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h @@ -0,0 +1,100 @@ +// Copyright (C) 2017-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_REGULAR_INVERSE_H +#define SPARSE_REGULAR_INVERSE_H + +#include +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines matrix operations required by the generalized eigen solver +/// in the regular inverse mode. For a sparse and positive definite matrix \f$B\f$, +/// it implements the matrix-vector product \f$y=Bx\f$ and the linear equation +/// solving operation \f$y=B^{-1}x\f$. +/// +/// This class is intended to be used with the SymGEigsSolver generalized eigen solver +/// in the regular inverse mode. +/// +template +class SparseRegularInverse +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + const int m_n; + Eigen::ConjugateGradient m_cg; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseRegularInverse(ConstGenericSparseMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseRegularInverse: matrix must be square"); + + m_cg.compute(mat); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Perform the solving operation \f$y=B^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(B) * x_in + void solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_cg.solve(x); + } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Bx\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = B * x_in + void mat_prod(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_mat.template selfadjointView() * x; + } +}; + +} // namespace Spectra + +#endif // SPARSE_REGULAR_INVERSE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h b/gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h new file mode 100644 index 000000000..2dd8799eb --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h @@ -0,0 +1,73 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_SYM_MAT_PROD_H +#define SPARSE_SYM_MAT_PROD_H + +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// sparse real symmetric matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver. +/// +template +class SparseSymMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseSymMatProd(ConstGenericSparseMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat.template selfadjointView() * x; + } +}; + +} // namespace Spectra + +#endif // SPARSE_SYM_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h new file mode 100644 index 000000000..674c6ac52 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h @@ -0,0 +1,95 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_SYM_SHIFT_SOLVE_H +#define SPARSE_SYM_SHIFT_SOLVE_H + +#include +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a sparse real symmetric matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver. +/// +template +class SparseSymShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + const int m_n; + Eigen::SparseLU m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseSymShiftSolve(ConstGenericSparseMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseSymShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + SparseMatrix mat = m_mat.template selfadjointView(); + SparseMatrix identity(m_n, m_n); + identity.setIdentity(); + mat = mat - sigma * identity; + m_solver.isSymmetric(true); + m_solver.compute(mat); + if (m_solver.info() != Eigen::Success) + throw std::invalid_argument("SparseSymShiftSolve: factorization failed with the given shift"); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // SPARSE_SYM_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h b/gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h new file mode 100644 index 000000000..68654aafd --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h @@ -0,0 +1,150 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef ARNOLDI_OP_H +#define ARNOLDI_OP_H + +#include +#include // std::sqrt + +namespace Spectra { + +/// +/// \ingroup Internals +/// @{ +/// + +/// +/// \defgroup Operators Operators +/// +/// Different types of operators. +/// + +/// +/// \ingroup Operators +/// +/// Operators used in the Arnoldi factorization. +/// +template +class ArnoldiOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + + OpType& m_op; + BOpType& m_Bop; + Vector m_cache; + +public: + ArnoldiOp(OpType* op, BOpType* Bop) : + m_op(*op), m_Bop(*Bop), m_cache(op->rows()) + {} + + inline Index rows() const { return m_op.rows(); } + + // In generalized eigenvalue problem Ax=lambda*Bx, define the inner product to be = x'By. + // For regular eigenvalue problems, it is the usual inner product = x'y + + // Compute = x'By + // x and y are two vectors + template + Scalar inner_product(const Arg1& x, const Arg2& y) + { + m_Bop.mat_prod(y.data(), m_cache.data()); + return x.dot(m_cache); + } + + // Compute res = = X'By + // X is a matrix, y is a vector, res is a vector + template + void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref res) + { + m_Bop.mat_prod(y.data(), m_cache.data()); + res.noalias() = x.transpose() * m_cache; + } + + // B-norm of a vector, ||x||_B = sqrt(x'Bx) + template + Scalar norm(const Arg& x) + { + using std::sqrt; + return sqrt(inner_product(x, x)); + } + + // The "A" operator to generate the Krylov subspace + inline void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_op.perform_op(x_in, y_out); + } +}; + +/// +/// \ingroup Operators +/// +/// Placeholder for the B-operator when \f$B = I\f$. +/// +class IdentityBOp +{}; + +/// +/// \ingroup Operators +/// +/// Partial specialization for the case \f$B = I\f$. +/// +template +class ArnoldiOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + + OpType& m_op; + +public: + ArnoldiOp(OpType* op, IdentityBOp* /*Bop*/) : + m_op(*op) + {} + + inline Index rows() const { return m_op.rows(); } + + // Compute = x'y + // x and y are two vectors + template + Scalar inner_product(const Arg1& x, const Arg2& y) const + { + return x.dot(y); + } + + // Compute res = = X'y + // X is a matrix, y is a vector, res is a vector + template + void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref res) const + { + res.noalias() = x.transpose() * y; + } + + // B-norm of a vector. For regular eigenvalue problems it is simply the L2 norm + template + Scalar norm(const Arg& x) + { + return x.norm(); + } + + // The "A" operator to generate the Krylov subspace + inline void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_op.perform_op(x_in, y_out); + } +}; + +/// +/// @} +/// + +} // namespace Spectra + +#endif // ARNOLDI_OP_H diff --git a/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h new file mode 100644 index 000000000..fa9958352 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h @@ -0,0 +1,75 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_GEIGS_CHOLESKY_OP_H +#define SYM_GEIGS_CHOLESKY_OP_H + +#include +#include "../DenseSymMatProd.h" +#include "../DenseCholesky.h" + +namespace Spectra { + +/// +/// \ingroup Operators +/// +/// This class defines the matrix operation for generalized eigen solver in the +/// Cholesky decomposition mode. It calculates \f$y=L^{-1}A(L')^{-1}x\f$ for any +/// vector \f$x\f$, where \f$L\f$ is the Cholesky decomposition of \f$B\f$. +/// This class is intended for internal use. +/// +template , + typename BOpType = DenseCholesky > +class SymGEigsCholeskyOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + OpType& m_op; + BOpType& m_Bop; + Vector m_cache; // temporary working space + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. + /// + SymGEigsCholeskyOp(OpType& op, BOpType& Bop) : + m_op(op), m_Bop(Bop), m_cache(op.rows()) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_Bop.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_Bop.rows(); } + + /// + /// Perform the matrix operation \f$y=L^{-1}A(L')^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L) * A * inv(L') * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_Bop.upper_triangular_solve(x_in, y_out); + m_op.perform_op(y_out, m_cache.data()); + m_Bop.lower_triangular_solve(m_cache.data(), y_out); + } +}; + +} // namespace Spectra + +#endif // SYM_GEIGS_CHOLESKY_OP_H diff --git a/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h new file mode 100644 index 000000000..514269c7f --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h @@ -0,0 +1,72 @@ +// Copyright (C) 2017-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_GEIGS_REG_INV_OP_H +#define SYM_GEIGS_REG_INV_OP_H + +#include +#include "../SparseSymMatProd.h" +#include "../SparseRegularInverse.h" + +namespace Spectra { + +/// +/// \ingroup Operators +/// +/// This class defines the matrix operation for generalized eigen solver in the +/// regular inverse mode. This class is intended for internal use. +/// +template , + typename BOpType = SparseRegularInverse > +class SymGEigsRegInvOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + OpType& m_op; + BOpType& m_Bop; + Vector m_cache; // temporary working space + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. + /// + SymGEigsRegInvOp(OpType& op, BOpType& Bop) : + m_op(op), m_Bop(Bop), m_cache(op.rows()) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_Bop.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_Bop.rows(); } + + /// + /// Perform the matrix operation \f$y=B^{-1}Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(B) * A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_op.perform_op(x_in, m_cache.data()); + m_Bop.solve(m_cache.data(), y_out); + } +}; + +} // namespace Spectra + +#endif // SYM_GEIGS_REG_INV_OP_H diff --git a/gtsam/3rdparty/Spectra/SymEigsBase.h b/gtsam/3rdparty/Spectra/SymEigsBase.h new file mode 100644 index 000000000..9601425d5 --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymEigsBase.h @@ -0,0 +1,448 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_EIGS_BASE_H +#define SYM_EIGS_BASE_H + +#include +#include // std::vector +#include // std::abs, std::pow, std::sqrt +#include // std::min, std::copy +#include // std::invalid_argument + +#include "Util/TypeTraits.h" +#include "Util/SelectionRule.h" +#include "Util/CompInfo.h" +#include "Util/SimpleRandom.h" +#include "MatOp/internal/ArnoldiOp.h" +#include "LinAlg/UpperHessenbergQR.h" +#include "LinAlg/TridiagEigen.h" +#include "LinAlg/Lanczos.h" + +namespace Spectra { + +/// +/// \defgroup EigenSolver Eigen Solvers +/// +/// Eigen solvers for different types of problems. +/// + +/// +/// \ingroup EigenSolver +/// +/// This is the base class for symmetric eigen solvers, mainly for internal use. +/// It is kept here to provide the documentation for member functions of concrete eigen solvers +/// such as SymEigsSolver and SymEigsShiftSolver. +/// +template +class SymEigsBase +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Array Array; + typedef Eigen::Array BoolArray; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstVec; + + typedef ArnoldiOp ArnoldiOpType; + typedef Lanczos LanczosFac; + +protected: + // clang-format off + OpType* m_op; // object to conduct matrix operation, + // e.g. matrix-vector product + const Index m_n; // dimension of matrix A + const Index m_nev; // number of eigenvalues requested + const Index m_ncv; // dimension of Krylov subspace in the Lanczos method + Index m_nmatop; // number of matrix operations called + Index m_niter; // number of restarting iterations + + LanczosFac m_fac; // Lanczos factorization + Vector m_ritz_val; // Ritz values + +private: + Matrix m_ritz_vec; // Ritz vectors + Vector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates + BoolArray m_ritz_conv; // indicator of the convergence of Ritz values + int m_info; // status of the computation + + const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type + const Scalar m_eps23; // m_eps^(2/3), used to test the convergence + // clang-format on + + // Implicitly restarted Lanczos factorization + void restart(Index k) + { + if (k >= m_ncv) + return; + + TridiagQR decomp(m_ncv); + Matrix Q = Matrix::Identity(m_ncv, m_ncv); + + for (Index i = k; i < m_ncv; i++) + { + // QR decomposition of H-mu*I, mu is the shift + decomp.compute(m_fac.matrix_H(), m_ritz_val[i]); + + // Q -> Q * Qi + decomp.apply_YQ(Q); + // H -> Q'HQ + // Since QR = H - mu * I, we have H = QR + mu * I + // and therefore Q'HQ = RQ + mu * I + m_fac.compress_H(decomp); + } + + m_fac.compress_V(Q); + m_fac.factorize_from(k, m_ncv, m_nmatop); + + retrieve_ritzpair(); + } + + // Calculates the number of converged Ritz values + Index num_converged(Scalar tol) + { + // thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value + Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23); + Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm(); + // Converged "wanted" Ritz values + m_ritz_conv = (resid < thresh); + + return m_ritz_conv.cast().sum(); + } + + // Returns the adjusted nev for restarting + Index nev_adjusted(Index nconv) + { + using std::abs; + + Index nev_new = m_nev; + for (Index i = m_nev; i < m_ncv; i++) + if (abs(m_ritz_est[i]) < m_near_0) + nev_new++; + + // Adjust nev_new, according to dsaup2.f line 677~684 in ARPACK + nev_new += std::min(nconv, (m_ncv - nev_new) / 2); + if (nev_new == 1 && m_ncv >= 6) + nev_new = m_ncv / 2; + else if (nev_new == 1 && m_ncv > 2) + nev_new = 2; + + if (nev_new > m_ncv - 1) + nev_new = m_ncv - 1; + + return nev_new; + } + + // Retrieves and sorts Ritz values and Ritz vectors + void retrieve_ritzpair() + { + TridiagEigen decomp(m_fac.matrix_H()); + const Vector& evals = decomp.eigenvalues(); + const Matrix& evecs = decomp.eigenvectors(); + + SortEigenvalue sorting(evals.data(), evals.size()); + std::vector ind = sorting.index(); + + // For BOTH_ENDS, the eigenvalues are sorted according + // to the LARGEST_ALGE rule, so we need to move those smallest + // values to the left + // The order would be + // Largest => Smallest => 2nd largest => 2nd smallest => ... + // We keep this order since the first k values will always be + // the wanted collection, no matter k is nev_updated (used in restart()) + // or is nev (used in sort_ritzpair()) + if (SelectionRule == BOTH_ENDS) + { + std::vector ind_copy(ind); + for (Index i = 0; i < m_ncv; i++) + { + // If i is even, pick values from the left (large values) + // If i is odd, pick values from the right (small values) + if (i % 2 == 0) + ind[i] = ind_copy[i / 2]; + else + ind[i] = ind_copy[m_ncv - 1 - i / 2]; + } + } + + // Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively + for (Index i = 0; i < m_ncv; i++) + { + m_ritz_val[i] = evals[ind[i]]; + m_ritz_est[i] = evecs(m_ncv - 1, ind[i]); + } + for (Index i = 0; i < m_nev; i++) + { + m_ritz_vec.col(i).noalias() = evecs.col(ind[i]); + } + } + +protected: + // Sorts the first nev Ritz pairs in the specified order + // This is used to return the final results + virtual void sort_ritzpair(int sort_rule) + { + // First make sure that we have a valid index vector + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + std::vector ind = sorting.index(); + + switch (sort_rule) + { + case LARGEST_ALGE: + break; + case LARGEST_MAGN: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_ALGE: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_MAGN: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + default: + throw std::invalid_argument("unsupported sorting rule"); + } + + Vector new_ritz_val(m_ncv); + Matrix new_ritz_vec(m_ncv, m_nev); + BoolArray new_ritz_conv(m_nev); + + for (Index i = 0; i < m_nev; i++) + { + new_ritz_val[i] = m_ritz_val[ind[i]]; + new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]); + new_ritz_conv[i] = m_ritz_conv[ind[i]]; + } + + m_ritz_val.swap(new_ritz_val); + m_ritz_vec.swap(new_ritz_vec); + m_ritz_conv.swap(new_ritz_conv); + } + +public: + /// \cond + + SymEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) : + m_op(op), + m_n(m_op->rows()), + m_nev(nev), + m_ncv(ncv > m_n ? m_n : ncv), + m_nmatop(0), + m_niter(0), + m_fac(ArnoldiOpType(op, Bop), m_ncv), + m_info(NOT_COMPUTED), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3)) + { + if (nev < 1 || nev > m_n - 1) + throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix"); + + if (ncv <= nev || ncv > m_n) + throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix"); + } + + /// + /// Virtual destructor + /// + virtual ~SymEigsBase() {} + + /// \endcond + + /// + /// Initializes the solver by providing an initial residual vector. + /// + /// \param init_resid Pointer to the initial residual vector. + /// + /// **Spectra** (and also **ARPACK**) uses an iterative algorithm + /// to find eigenvalues. This function allows the user to provide the initial + /// residual vector. + /// + void init(const Scalar* init_resid) + { + // Reset all matrices/vectors to zero + m_ritz_val.resize(m_ncv); + m_ritz_vec.resize(m_ncv, m_nev); + m_ritz_est.resize(m_ncv); + m_ritz_conv.resize(m_nev); + + m_ritz_val.setZero(); + m_ritz_vec.setZero(); + m_ritz_est.setZero(); + m_ritz_conv.setZero(); + + m_nmatop = 0; + m_niter = 0; + + // Initialize the Lanczos factorization + MapConstVec v0(init_resid, m_n); + m_fac.init(v0, m_nmatop); + } + + /// + /// Initializes the solver by providing a random initial residual vector. + /// + /// This overloaded function generates a random initial residual vector + /// (with a fixed random seed) for the algorithm. Elements in the vector + /// follow independent Uniform(-0.5, 0.5) distribution. + /// + void init() + { + SimpleRandom rng(0); + Vector init_resid = rng.random_vec(m_n); + init(init_resid.data()); + } + + /// + /// Conducts the major computation procedure. + /// + /// \param maxit Maximum number of iterations allowed in the algorithm. + /// \param tol Precision parameter for the calculated eigenvalues. + /// \param sort_rule Rule to sort the eigenvalues and eigenvectors. + /// Supported values are + /// `Spectra::LARGEST_ALGE`, `Spectra::LARGEST_MAGN`, + /// `Spectra::SMALLEST_ALGE` and `Spectra::SMALLEST_MAGN`, + /// for example `LARGEST_ALGE` indicates that largest eigenvalues + /// come first. Note that this argument is only used to + /// **sort** the final result, and the **selection** rule + /// (e.g. selecting the largest or smallest eigenvalues in the + /// full spectrum) is specified by the template parameter + /// `SelectionRule` of SymEigsSolver. + /// + /// \return Number of converged eigenvalues. + /// + Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_ALGE) + { + // The m-step Lanczos factorization + m_fac.factorize_from(1, m_ncv, m_nmatop); + retrieve_ritzpair(); + // Restarting + Index i, nconv = 0, nev_adj; + for (i = 0; i < maxit; i++) + { + nconv = num_converged(tol); + if (nconv >= m_nev) + break; + + nev_adj = nev_adjusted(nconv); + restart(nev_adj); + } + // Sorting results + sort_ritzpair(sort_rule); + + m_niter += i + 1; + m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING; + + return std::min(m_nev, nconv); + } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Returns the number of iterations used in the computation. + /// + Index num_iterations() const { return m_niter; } + + /// + /// Returns the number of matrix operations used in the computation. + /// + Index num_operations() const { return m_nmatop; } + + /// + /// Returns the converged eigenvalues. + /// + /// \return A vector containing the eigenvalues. + /// Returned vector type will be `Eigen::Vector`, depending on + /// the template parameter `Scalar` defined. + /// + Vector eigenvalues() const + { + const Index nconv = m_ritz_conv.cast().sum(); + Vector res(nconv); + + if (!nconv) + return res; + + Index j = 0; + for (Index i = 0; i < m_nev; i++) + { + if (m_ritz_conv[i]) + { + res[j] = m_ritz_val[i]; + j++; + } + } + + return res; + } + + /// + /// Returns the eigenvectors associated with the converged eigenvalues. + /// + /// \param nvec The number of eigenvectors to return. + /// + /// \return A matrix containing the eigenvectors. + /// Returned matrix type will be `Eigen::Matrix`, + /// depending on the template parameter `Scalar` defined. + /// + virtual Matrix eigenvectors(Index nvec) const + { + const Index nconv = m_ritz_conv.cast().sum(); + nvec = std::min(nvec, nconv); + Matrix res(m_n, nvec); + + if (!nvec) + return res; + + Matrix ritz_vec_conv(m_ncv, nvec); + Index j = 0; + for (Index i = 0; i < m_nev && j < nvec; i++) + { + if (m_ritz_conv[i]) + { + ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i); + j++; + } + } + + res.noalias() = m_fac.matrix_V() * ritz_vec_conv; + + return res; + } + + /// + /// Returns all converged eigenvectors. + /// + virtual Matrix eigenvectors() const + { + return eigenvectors(m_nev); + } +}; + +} // namespace Spectra + +#endif // SYM_EIGS_BASE_H diff --git a/gtsam/3rdparty/Spectra/SymEigsShiftSolver.h b/gtsam/3rdparty/Spectra/SymEigsShiftSolver.h new file mode 100644 index 000000000..e2b410cb1 --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymEigsShiftSolver.h @@ -0,0 +1,203 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_EIGS_SHIFT_SOLVER_H +#define SYM_EIGS_SHIFT_SOLVER_H + +#include + +#include "SymEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseSymShiftSolve.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for real symmetric matrices using +/// the **shift-and-invert mode**. The background information of the symmetric +/// eigen solver is documented in the SymEigsSolver class. Here we focus on +/// explaining the shift-and-invert mode. +/// +/// The shift-and-invert mode is based on the following fact: +/// If \f$\lambda\f$ and \f$x\f$ are a pair of eigenvalue and eigenvector of +/// matrix \f$A\f$, such that \f$Ax=\lambda x\f$, then for any \f$\sigma\f$, +/// we have +/// \f[(A-\sigma I)^{-1}x=\nu x\f] +/// where +/// \f[\nu=\frac{1}{\lambda-\sigma}\f] +/// which indicates that \f$(\nu, x)\f$ is an eigenpair of the matrix +/// \f$(A-\sigma I)^{-1}\f$. +/// +/// Therefore, if we pass the matrix operation \f$(A-\sigma I)^{-1}y\f$ +/// (rather than \f$Ay\f$) to the eigen solver, then we would get the desired +/// values of \f$\nu\f$, and \f$\lambda\f$ can also be easily obtained by noting +/// that \f$\lambda=\sigma+\nu^{-1}\f$. +/// +/// The reason why we need this type of manipulation is that +/// the algorithm of **Spectra** (and also **ARPACK**) +/// is good at finding eigenvalues with large magnitude, but may fail in looking +/// for eigenvalues that are close to zero. However, if we really need them, we +/// can set \f$\sigma=0\f$, find the largest eigenvalues of \f$A^{-1}\f$, and then +/// transform back to \f$\lambda\f$, since in this case largest values of \f$\nu\f$ +/// implies smallest values of \f$\lambda\f$. +/// +/// To summarize, in the shift-and-invert mode, the selection rule will apply to +/// \f$\nu=1/(\lambda-\sigma)\f$ rather than \f$\lambda\f$. So a selection rule +/// of `LARGEST_MAGN` combined with shift \f$\sigma\f$ will find eigenvalues of +/// \f$A\f$ that are closest to \f$\sigma\f$. But note that the eigenvalues() +/// method will always return the eigenvalues in the original problem (i.e., +/// returning \f$\lambda\f$ rather than \f$\nu\f$), and eigenvectors are the +/// same for both the original problem and the shifted-and-inverted problem. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the shifted-and-inverted eigenvalues. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseSymShiftSolve and +/// SparseSymShiftSolve, or define their +/// own that implements all the public member functions as in +/// DenseSymShiftSolve. +/// +/// Below is an example that illustrates the use of the shift-and-invert mode: +/// +/// \code{.cpp} +/// #include +/// #include +/// // is implicitly included +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // A size-10 diagonal matrix with elements 1, 2, ..., 10 +/// Eigen::MatrixXd M = Eigen::MatrixXd::Zero(10, 10); +/// for(int i = 0; i < M.rows(); i++) +/// M(i, i) = i + 1; +/// +/// // Construct matrix operation object using the wrapper class +/// DenseSymShiftSolve op(M); +/// +/// // Construct eigen solver object with shift 0 +/// // This will find eigenvalues that are closest to 0 +/// SymEigsShiftSolver< double, LARGEST_MAGN, +/// DenseSymShiftSolve > eigs(&op, 3, 6, 0.0); +/// +/// eigs.init(); +/// eigs.compute(); +/// if(eigs.info() == SUCCESSFUL) +/// { +/// Eigen::VectorXd evalues = eigs.eigenvalues(); +/// // Will get (3.0, 2.0, 1.0) +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// } +/// +/// return 0; +/// } +/// \endcode +/// +/// Also an example for user-supplied matrix shift-solve operation class: +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// // M = diag(1, 2, ..., 10) +/// class MyDiagonalTenShiftSolve +/// { +/// private: +/// double sigma_; +/// public: +/// int rows() { return 10; } +/// int cols() { return 10; } +/// void set_shift(double sigma) { sigma_ = sigma; } +/// // y_out = inv(A - sigma * I) * x_in +/// // inv(A - sigma * I) = diag(1/(1-sigma), 1/(2-sigma), ...) +/// void perform_op(double *x_in, double *y_out) +/// { +/// for(int i = 0; i < rows(); i++) +/// { +/// y_out[i] = x_in[i] / (i + 1 - sigma_); +/// } +/// } +/// }; +/// +/// int main() +/// { +/// MyDiagonalTenShiftSolve op; +/// // Find three eigenvalues that are closest to 3.14 +/// SymEigsShiftSolver eigs(&op, 3, 6, 3.14); +/// eigs.init(); +/// eigs.compute(); +/// if(eigs.info() == SUCCESSFUL) +/// { +/// Eigen::VectorXd evalues = eigs.eigenvalues(); +/// // Will get (4.0, 3.0, 2.0) +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// } +/// +/// return 0; +/// } +/// \endcode +/// +template > +class SymEigsShiftSolver : public SymEigsBase +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Array Array; + + const Scalar m_sigma; + + // First transform back the Ritz values, and then sort + void sort_ritzpair(int sort_rule) + { + Array m_ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma; + this->m_ritz_val.head(this->m_nev) = m_ritz_val_org; + SymEigsBase::sort_ritzpair(sort_rule); + } + +public: + /// + /// Constructor to create a eigen solver object using the shift-and-invert mode. + /// + /// \param op Pointer to the matrix operation object, which should implement + /// the shift-solve operation of \f$A\f$: calculating + /// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseSymShiftSolve, or + /// define their own that implements all the public member functions + /// as in DenseSymShiftSolve. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv_` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// \param sigma The value of the shift. + /// + SymEigsShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) : + SymEigsBase(op, NULL, nev, ncv), + m_sigma(sigma) + { + this->m_op->set_shift(m_sigma); + } +}; + +} // namespace Spectra + +#endif // SYM_EIGS_SHIFT_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/SymEigsSolver.h b/gtsam/3rdparty/Spectra/SymEigsSolver.h new file mode 100644 index 000000000..404df1e52 --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymEigsSolver.h @@ -0,0 +1,171 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_EIGS_SOLVER_H +#define SYM_EIGS_SOLVER_H + +#include + +#include "SymEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseSymMatProd.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for real symmetric matrices, i.e., +/// to solve \f$Ax=\lambda x\f$ where \f$A\f$ is symmetric. +/// +/// **Spectra** is designed to calculate a specified number (\f$k\f$) +/// of eigenvalues of a large square matrix (\f$A\f$). Usually \f$k\f$ is much +/// less than the size of the matrix (\f$n\f$), so that only a few eigenvalues +/// and eigenvectors are computed. +/// +/// Rather than providing the whole \f$A\f$ matrix, the algorithm only requires +/// the matrix-vector multiplication operation of \f$A\f$. Therefore, users of +/// this solver need to supply a class that computes the result of \f$Av\f$ +/// for any given vector \f$v\f$. The name of this class should be given to +/// the template parameter `OpType`, and instance of this class passed to +/// the constructor of SymEigsSolver. +/// +/// If the matrix \f$A\f$ is already stored as a matrix object in **Eigen**, +/// for example `Eigen::MatrixXd`, then there is an easy way to construct such +/// matrix operation class, by using the built-in wrapper class DenseSymMatProd +/// which wraps an existing matrix object in **Eigen**. This is also the +/// default template parameter for SymEigsSolver. For sparse matrices, the +/// wrapper class SparseSymMatProd can be used similarly. +/// +/// If the users need to define their own matrix-vector multiplication operation +/// class, it should implement all the public member functions as in DenseSymMatProd. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseSymMatProd and +/// SparseSymMatProd, or define their +/// own that implements all the public member functions as in +/// DenseSymMatProd. +/// +/// Below is an example that demonstrates the usage of this class. +/// +/// \code{.cpp} +/// #include +/// #include +/// // is implicitly included +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // We are going to calculate the eigenvalues of M +/// Eigen::MatrixXd A = Eigen::MatrixXd::Random(10, 10); +/// Eigen::MatrixXd M = A + A.transpose(); +/// +/// // Construct matrix operation object using the wrapper class DenseSymMatProd +/// DenseSymMatProd op(M); +/// +/// // Construct eigen solver object, requesting the largest three eigenvalues +/// SymEigsSolver< double, LARGEST_ALGE, DenseSymMatProd > eigs(&op, 3, 6); +/// +/// // Initialize and compute +/// eigs.init(); +/// int nconv = eigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXd evalues; +/// if(eigs.info() == SUCCESSFUL) +/// evalues = eigs.eigenvalues(); +/// +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// +/// return 0; +/// } +/// \endcode +/// +/// And here is an example for user-supplied matrix operation class. +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// // M = diag(1, 2, ..., 10) +/// class MyDiagonalTen +/// { +/// public: +/// int rows() { return 10; } +/// int cols() { return 10; } +/// // y_out = M * x_in +/// void perform_op(double *x_in, double *y_out) +/// { +/// for(int i = 0; i < rows(); i++) +/// { +/// y_out[i] = x_in[i] * (i + 1); +/// } +/// } +/// }; +/// +/// int main() +/// { +/// MyDiagonalTen op; +/// SymEigsSolver eigs(&op, 3, 6); +/// eigs.init(); +/// eigs.compute(); +/// if(eigs.info() == SUCCESSFUL) +/// { +/// Eigen::VectorXd evalues = eigs.eigenvalues(); +/// // Will get (10, 9, 8) +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// } +/// +/// return 0; +/// } +/// \endcode +/// +template > +class SymEigsSolver : public SymEigsBase +{ +private: + typedef Eigen::Index Index; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the matrix operation object, which should implement + /// the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseSymMatProd, or + /// define their own that implements all the public member functions + /// as in DenseSymMatProd. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// + SymEigsSolver(OpType* op, Index nev, Index ncv) : + SymEigsBase(op, NULL, nev, ncv) + {} +}; + +} // namespace Spectra + +#endif // SYM_EIGS_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/SymGEigsSolver.h b/gtsam/3rdparty/Spectra/SymGEigsSolver.h new file mode 100644 index 000000000..68aa37cfc --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymGEigsSolver.h @@ -0,0 +1,326 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_GEIGS_SOLVER_H +#define SYM_GEIGS_SOLVER_H + +#include "SymEigsBase.h" +#include "Util/GEigsMode.h" +#include "MatOp/internal/SymGEigsCholeskyOp.h" +#include "MatOp/internal/SymGEigsRegInvOp.h" + +namespace Spectra { + +/// +/// \defgroup GEigenSolver Generalized Eigen Solvers +/// +/// Generalized eigen solvers for different types of problems. +/// + +/// +/// \ingroup GEigenSolver +/// +/// This class implements the generalized eigen solver for real symmetric +/// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ is symmetric and +/// \f$B\f$ is positive definite. +/// +/// There are two modes of this solver, specified by the template parameter +/// GEigsMode. See the pages for the specialized classes for details. +/// - The Cholesky mode assumes that \f$B\f$ can be factorized using Cholesky +/// decomposition, which is the preferred mode when the decomposition is +/// available. (This can be easily done in Eigen using the dense or sparse +/// Cholesky solver.) +/// See \ref SymGEigsSolver "SymGEigsSolver (Cholesky mode)" for more details. +/// - The regular inverse mode requires the matrix-vector product \f$Bv\f$ and the +/// linear equation solving operation \f$B^{-1}v\f$. This mode should only be +/// used when the Cholesky decomposition of \f$B\f$ is hard to implement, or +/// when computing \f$B^{-1}v\f$ is much faster than the Cholesky decomposition. +/// See \ref SymGEigsSolver "SymGEigsSolver (Regular inverse mode)" for more details. + +// Empty class template +template +class SymGEigsSolver +{}; + +/// +/// \ingroup GEigenSolver +/// +/// This class implements the generalized eigen solver for real symmetric +/// matrices using Cholesky decomposition, i.e., to solve \f$Ax=\lambda Bx\f$ +/// where \f$A\f$ is symmetric and \f$B\f$ is positive definite with the Cholesky +/// decomposition \f$B=LL'\f$. +/// +/// This solver requires two matrix operation objects: one for \f$A\f$ that implements +/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the lower +/// and upper triangular solving \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$. +/// +/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation +/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and +/// the second operation can be created using the DenseCholesky or SparseCholesky +/// classes. If the users need to define their own operation classes, then they +/// should implement all the public member functions as in those built-in classes. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either +/// use the wrapper classes such as DenseSymMatProd and +/// SparseSymMatProd, or define their +/// own that implements all the public member functions as in +/// DenseSymMatProd. +/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either +/// use the wrapper classes such as DenseCholesky and +/// SparseCholesky, or define their +/// own that implements all the public member functions as in +/// DenseCholesky. +/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver +/// it is Spectra::GEIGS_CHOLESKY. +/// +/// Below is an example that demonstrates the usage of this class. +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// #include +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // We are going to solve the generalized eigenvalue problem A * x = lambda * B * x +/// const int n = 100; +/// +/// // Define the A matrix +/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n); +/// Eigen::MatrixXd A = M + M.transpose(); +/// +/// // Define the B matrix, a band matrix with 2 on the diagonal and 1 on the subdiagonals +/// Eigen::SparseMatrix B(n, n); +/// B.reserve(Eigen::VectorXi::Constant(n, 3)); +/// for(int i = 0; i < n; i++) +/// { +/// B.insert(i, i) = 2.0; +/// if(i > 0) +/// B.insert(i - 1, i) = 1.0; +/// if(i < n - 1) +/// B.insert(i + 1, i) = 1.0; +/// } +/// +/// // Construct matrix operation object using the wrapper classes +/// DenseSymMatProd op(A); +/// SparseCholesky Bop(B); +/// +/// // Construct generalized eigen solver object, requesting the largest three generalized eigenvalues +/// SymGEigsSolver, SparseCholesky, GEIGS_CHOLESKY> +/// geigs(&op, &Bop, 3, 6); +/// +/// // Initialize and compute +/// geigs.init(); +/// int nconv = geigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXd evalues; +/// Eigen::MatrixXd evecs; +/// if(geigs.info() == SUCCESSFUL) +/// { +/// evalues = geigs.eigenvalues(); +/// evecs = geigs.eigenvectors(); +/// } +/// +/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl; +/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl; +/// +/// // Verify results using the generalized eigen solver in Eigen +/// Eigen::MatrixXd Bdense = B; +/// Eigen::GeneralizedSelfAdjointEigenSolver es(A, Bdense); +/// +/// std::cout << "Generalized eigenvalues:\n" << es.eigenvalues().tail(3) << std::endl; +/// std::cout << "Generalized eigenvectors:\n" << es.eigenvectors().rightCols(3).topRows(10) << std::endl; +/// +/// return 0; +/// } +/// \endcode + +// Partial specialization for GEigsMode = GEIGS_CHOLESKY +template +class SymGEigsSolver : + public SymEigsBase, IdentityBOp> +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + BOpType* m_Bop; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. It + /// should implement the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper classes such as DenseSymMatProd, or + /// define their own that implements all the public member functions + /// as in DenseSymMatProd. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. It + /// represents a Cholesky decomposition of \f$B\f$, and should + /// implement the lower and upper triangular solving operations: + /// calculating \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$ for any vector + /// \f$v\f$, where \f$LL'=B\f$. Users could either + /// create the object from the wrapper classes such as DenseCholesky, or + /// define their own that implements all the public member functions + /// as in DenseCholesky. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// + SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) : + SymEigsBase, IdentityBOp>( + new SymGEigsCholeskyOp(*op, *Bop), NULL, nev, ncv), + m_Bop(Bop) + {} + + /// \cond + + ~SymGEigsSolver() + { + // m_op contains the constructed SymGEigsCholeskyOp object + delete this->m_op; + } + + Matrix eigenvectors(Index nvec) const + { + Matrix res = SymEigsBase, IdentityBOp>::eigenvectors(nvec); + Vector tmp(res.rows()); + const Index nconv = res.cols(); + for (Index i = 0; i < nconv; i++) + { + m_Bop->upper_triangular_solve(&res(0, i), tmp.data()); + res.col(i).noalias() = tmp; + } + + return res; + } + + Matrix eigenvectors() const + { + return SymGEigsSolver::eigenvectors(this->m_nev); + } + + /// \endcond +}; + +/// +/// \ingroup GEigenSolver +/// +/// This class implements the generalized eigen solver for real symmetric +/// matrices in the regular inverse mode, i.e., to solve \f$Ax=\lambda Bx\f$ +/// where \f$A\f$ is symmetric, and \f$B\f$ is positive definite with the operations +/// defined below. +/// +/// This solver requires two matrix operation objects: one for \f$A\f$ that implements +/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the +/// matrix-vector product \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$. +/// +/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation +/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and +/// the second operation can be created using the SparseRegularInverse class. There is no +/// wrapper class for a dense \f$B\f$ matrix since in this case the Cholesky mode +/// is always preferred. If the users need to define their own operation classes, then they +/// should implement all the public member functions as in those built-in classes. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either +/// use the wrapper classes such as DenseSymMatProd and +/// SparseSymMatProd, or define their +/// own that implements all the public member functions as in +/// DenseSymMatProd. +/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either +/// use the wrapper class SparseRegularInverse, or define their +/// own that implements all the public member functions as in +/// SparseRegularInverse. +/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver +/// it is Spectra::GEIGS_REGULAR_INVERSE. +/// + +// Partial specialization for GEigsMode = GEIGS_REGULAR_INVERSE +template +class SymGEigsSolver : + public SymEigsBase, BOpType> +{ +private: + typedef Eigen::Index Index; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. It + /// should implement the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper classes such as DenseSymMatProd, or + /// define their own that implements all the public member functions + /// as in DenseSymMatProd. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. It should + /// implement the multiplication operation \f$Bv\f$ and the linear equation + /// solving operation \f$B^{-1}v\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class SparseRegularInverse, or + /// define their own that implements all the public member functions + /// as in SparseRegularInverse. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// + SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) : + SymEigsBase, BOpType>( + new SymGEigsRegInvOp(*op, *Bop), Bop, nev, ncv) + {} + + /// \cond + ~SymGEigsSolver() + { + // m_op contains the constructed SymGEigsRegInvOp object + delete this->m_op; + } + /// \endcond +}; + +} // namespace Spectra + +#endif // SYM_GEIGS_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/Util/CompInfo.h b/gtsam/3rdparty/Spectra/Util/CompInfo.h new file mode 100644 index 000000000..07b8399a1 --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/CompInfo.h @@ -0,0 +1,35 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef COMP_INFO_H +#define COMP_INFO_H + +namespace Spectra { + +/// +/// \ingroup Enumerations +/// +/// The enumeration to report the status of computation. +/// +enum COMPUTATION_INFO +{ + SUCCESSFUL = 0, ///< Computation was successful. + + NOT_COMPUTED, ///< Used in eigen solvers, indicating that computation + ///< has not been conducted. Users should call + ///< the `compute()` member function of solvers. + + NOT_CONVERGING, ///< Used in eigen solvers, indicating that some eigenvalues + ///< did not converge. The `compute()` + ///< function returns the number of converged eigenvalues. + + NUMERICAL_ISSUE ///< Used in Cholesky decomposition, indicating that the + ///< matrix is not positive definite. +}; + +} // namespace Spectra + +#endif // COMP_INFO_H diff --git a/gtsam/3rdparty/Spectra/Util/GEigsMode.h b/gtsam/3rdparty/Spectra/Util/GEigsMode.h new file mode 100644 index 000000000..a547ac0bf --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/GEigsMode.h @@ -0,0 +1,32 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEIGS_MODE_H +#define GEIGS_MODE_H + +namespace Spectra { + +/// +/// \ingroup Enumerations +/// +/// The enumeration to specify the mode of generalized eigenvalue solver. +/// +enum GEIGS_MODE +{ + GEIGS_CHOLESKY = 0, ///< Using Cholesky decomposition to solve generalized eigenvalues. + + GEIGS_REGULAR_INVERSE, ///< Regular inverse mode for generalized eigenvalue solver. + + GEIGS_SHIFT_INVERT, ///< Shift-and-invert mode for generalized eigenvalue solver. + + GEIGS_BUCKLING, ///< Buckling mode for generalized eigenvalue solver. + + GEIGS_CAYLEY ///< Cayley transformation mode for generalized eigenvalue solver. +}; + +} // namespace Spectra + +#endif // GEIGS_MODE_H diff --git a/gtsam/3rdparty/Spectra/Util/SelectionRule.h b/gtsam/3rdparty/Spectra/Util/SelectionRule.h new file mode 100644 index 000000000..237950b4d --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/SelectionRule.h @@ -0,0 +1,275 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SELECTION_RULE_H +#define SELECTION_RULE_H + +#include // std::vector +#include // std::abs +#include // std::sort +#include // std::complex +#include // std::pair +#include // std::invalid_argument + +namespace Spectra { + +/// +/// \defgroup Enumerations +/// +/// Enumeration types for the selection rule of eigenvalues. +/// + +/// +/// \ingroup Enumerations +/// +/// The enumeration of selection rules of desired eigenvalues. +/// +enum SELECT_EIGENVALUE +{ + LARGEST_MAGN = 0, ///< Select eigenvalues with largest magnitude. Magnitude + ///< means the absolute value for real numbers and norm for + ///< complex numbers. Applies to both symmetric and general + ///< eigen solvers. + + LARGEST_REAL, ///< Select eigenvalues with largest real part. Only for general eigen solvers. + + LARGEST_IMAG, ///< Select eigenvalues with largest imaginary part (in magnitude). Only for general eigen solvers. + + LARGEST_ALGE, ///< Select eigenvalues with largest algebraic value, considering + ///< any negative sign. Only for symmetric eigen solvers. + + SMALLEST_MAGN, ///< Select eigenvalues with smallest magnitude. Applies to both symmetric and general + ///< eigen solvers. + + SMALLEST_REAL, ///< Select eigenvalues with smallest real part. Only for general eigen solvers. + + SMALLEST_IMAG, ///< Select eigenvalues with smallest imaginary part (in magnitude). Only for general eigen solvers. + + SMALLEST_ALGE, ///< Select eigenvalues with smallest algebraic value. Only for symmetric eigen solvers. + + BOTH_ENDS ///< Select eigenvalues half from each end of the spectrum. When + ///< `nev` is odd, compute more from the high end. Only for symmetric eigen solvers. +}; + +/// +/// \ingroup Enumerations +/// +/// The enumeration of selection rules of desired eigenvalues. Alias for `SELECT_EIGENVALUE`. +/// +enum SELECT_EIGENVALUE_ALIAS +{ + WHICH_LM = 0, ///< Alias for `LARGEST_MAGN` + WHICH_LR, ///< Alias for `LARGEST_REAL` + WHICH_LI, ///< Alias for `LARGEST_IMAG` + WHICH_LA, ///< Alias for `LARGEST_ALGE` + WHICH_SM, ///< Alias for `SMALLEST_MAGN` + WHICH_SR, ///< Alias for `SMALLEST_REAL` + WHICH_SI, ///< Alias for `SMALLEST_IMAG` + WHICH_SA, ///< Alias for `SMALLEST_ALGE` + WHICH_BE ///< Alias for `BOTH_ENDS` +}; + +/// \cond + +// Get the element type of a "scalar" +// ElemType => double +// ElemType< std::complex > => double +template +class ElemType +{ +public: + typedef T type; +}; + +template +class ElemType > +{ +public: + typedef T type; +}; + +// When comparing eigenvalues, we first calculate the "target" +// to sort. For example, if we want to choose the eigenvalues with +// largest magnitude, the target will be -abs(x). +// The minus sign is due to the fact that std::sort() sorts in ascending order. + +// Default target: throw an exception +template +class SortingTarget +{ +public: + static typename ElemType::type get(const Scalar& val) + { + using std::abs; + throw std::invalid_argument("incompatible selection rule"); + return -abs(val); + } +}; + +// Specialization for LARGEST_MAGN +// This covers [float, double, complex] x [LARGEST_MAGN] +template +class SortingTarget +{ +public: + static typename ElemType::type get(const Scalar& val) + { + using std::abs; + return -abs(val); + } +}; + +// Specialization for LARGEST_REAL +// This covers [complex] x [LARGEST_REAL] +template +class SortingTarget, LARGEST_REAL> +{ +public: + static RealType get(const std::complex& val) + { + return -val.real(); + } +}; + +// Specialization for LARGEST_IMAG +// This covers [complex] x [LARGEST_IMAG] +template +class SortingTarget, LARGEST_IMAG> +{ +public: + static RealType get(const std::complex& val) + { + using std::abs; + return -abs(val.imag()); + } +}; + +// Specialization for LARGEST_ALGE +// This covers [float, double] x [LARGEST_ALGE] +template +class SortingTarget +{ +public: + static Scalar get(const Scalar& val) + { + return -val; + } +}; + +// Here BOTH_ENDS is the same as LARGEST_ALGE, but +// we need some additional steps, which are done in +// SymEigsSolver.h => retrieve_ritzpair(). +// There we move the smallest values to the proper locations. +template +class SortingTarget +{ +public: + static Scalar get(const Scalar& val) + { + return -val; + } +}; + +// Specialization for SMALLEST_MAGN +// This covers [float, double, complex] x [SMALLEST_MAGN] +template +class SortingTarget +{ +public: + static typename ElemType::type get(const Scalar& val) + { + using std::abs; + return abs(val); + } +}; + +// Specialization for SMALLEST_REAL +// This covers [complex] x [SMALLEST_REAL] +template +class SortingTarget, SMALLEST_REAL> +{ +public: + static RealType get(const std::complex& val) + { + return val.real(); + } +}; + +// Specialization for SMALLEST_IMAG +// This covers [complex] x [SMALLEST_IMAG] +template +class SortingTarget, SMALLEST_IMAG> +{ +public: + static RealType get(const std::complex& val) + { + using std::abs; + return abs(val.imag()); + } +}; + +// Specialization for SMALLEST_ALGE +// This covers [float, double] x [SMALLEST_ALGE] +template +class SortingTarget +{ +public: + static Scalar get(const Scalar& val) + { + return val; + } +}; + +// Sort eigenvalues and return the order index +template +class PairComparator +{ +public: + bool operator()(const PairType& v1, const PairType& v2) + { + return v1.first < v2.first; + } +}; + +template +class SortEigenvalue +{ +private: + typedef typename ElemType::type TargetType; // Type of the sorting target, will be + // a floating number type, e.g. "double" + typedef std::pair PairType; // Type of the sorting pair, including + // the sorting target and the index + + std::vector pair_sort; + +public: + SortEigenvalue(const T* start, int size) : + pair_sort(size) + { + for (int i = 0; i < size; i++) + { + pair_sort[i].first = SortingTarget::get(start[i]); + pair_sort[i].second = i; + } + PairComparator comp; + std::sort(pair_sort.begin(), pair_sort.end(), comp); + } + + std::vector index() + { + std::vector ind(pair_sort.size()); + for (unsigned int i = 0; i < ind.size(); i++) + ind[i] = pair_sort[i].second; + + return ind; + } +}; + +/// \endcond + +} // namespace Spectra + +#endif // SELECTION_RULE_H diff --git a/gtsam/3rdparty/Spectra/Util/SimpleRandom.h b/gtsam/3rdparty/Spectra/Util/SimpleRandom.h new file mode 100644 index 000000000..83fa7c86f --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/SimpleRandom.h @@ -0,0 +1,91 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SIMPLE_RANDOM_H +#define SIMPLE_RANDOM_H + +#include + +/// \cond + +namespace Spectra { + +// We need a simple pseudo random number generator here: +// 1. It is used to generate initial and restarted residual vector. +// 2. It is not necessary to be so "random" and advanced. All we hope +// is that the residual vector is not in the space spanned by the +// current Krylov space. This should be met almost surely. +// 3. We don't want to call RNG in C++, since we actually want the +// algorithm to be deterministic. Also, calling RNG in C/C++ is not +// allowed in R packages submitted to CRAN. +// 4. The method should be as simple as possible, so an LCG is enough. +// 5. Based on public domain code by Ray Gardner +// http://stjarnhimlen.se/snippets/rg_rand.c + +template +class SimpleRandom +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + + const unsigned int m_a; // multiplier + const unsigned long m_max; // 2^31 - 1 + long m_rand; + + inline long next_long_rand(long seed) + { + unsigned long lo, hi; + + lo = m_a * (long) (seed & 0xFFFF); + hi = m_a * (long) ((unsigned long) seed >> 16); + lo += (hi & 0x7FFF) << 16; + if (lo > m_max) + { + lo &= m_max; + ++lo; + } + lo += hi >> 15; + if (lo > m_max) + { + lo &= m_max; + ++lo; + } + return (long) lo; + } + +public: + SimpleRandom(unsigned long init_seed) : + m_a(16807), + m_max(2147483647L), + m_rand(init_seed ? (init_seed & m_max) : 1) + {} + + Scalar random() + { + m_rand = next_long_rand(m_rand); + return Scalar(m_rand) / Scalar(m_max) - Scalar(0.5); + } + + // Vector of random numbers of type Scalar + // Ranging from -0.5 to 0.5 + Vector random_vec(const Index len) + { + Vector res(len); + for (Index i = 0; i < len; i++) + { + m_rand = next_long_rand(m_rand); + res[i] = Scalar(m_rand) / Scalar(m_max) - Scalar(0.5); + } + return res; + } +}; + +} // namespace Spectra + +/// \endcond + +#endif // SIMPLE_RANDOM_H diff --git a/gtsam/3rdparty/Spectra/Util/TypeTraits.h b/gtsam/3rdparty/Spectra/Util/TypeTraits.h new file mode 100644 index 000000000..29288c5a6 --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/TypeTraits.h @@ -0,0 +1,71 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef TYPE_TRAITS_H +#define TYPE_TRAITS_H + +#include +#include + +/// \cond + +namespace Spectra { + +// For a real value type "Scalar", we want to know its smallest +// positive value, i.e., std::numeric_limits::min(). +// However, we must take non-standard value types into account, +// so we rely on Eigen::NumTraits. +// +// Eigen::NumTraits has defined epsilon() and lowest(), but +// lowest() means negative highest(), which is a very small +// negative value. +// +// Therefore, we manually define this limit, and use eplison()^3 +// to mimic it for non-standard types. + +// Generic definition +template +struct TypeTraits +{ + static inline Scalar min() + { + return Eigen::numext::pow(Eigen::NumTraits::epsilon(), Scalar(3)); + } +}; + +// Full specialization +template <> +struct TypeTraits +{ + static inline float min() + { + return std::numeric_limits::min(); + } +}; + +template <> +struct TypeTraits +{ + static inline double min() + { + return std::numeric_limits::min(); + } +}; + +template <> +struct TypeTraits +{ + static inline long double min() + { + return std::numeric_limits::min(); + } +}; + +} // namespace Spectra + +/// \endcond + +#endif // TYPE_TRAITS_H diff --git a/gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h b/gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h new file mode 100644 index 000000000..69c4d92c0 --- /dev/null +++ b/gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h @@ -0,0 +1,552 @@ +// Written by Anna Araslanova +// Modified by Yixuan Qiu +// License: MIT + +#ifndef LOBPCG_SOLVER +#define LOBPCG_SOLVER + +#include +#include + +#include +#include +#include +#include +#include + +#include "../SymGEigsSolver.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// + +/// *** METHOD +/// The class represent the LOBPCG algorithm, which was invented by Andrew Knyazev +/// Theoretical background of the procedure can be found in the articles below +/// - Knyazev, A.V., 2001. Toward the optimal preconditioned eigensolver : Locally optimal block preconditioned conjugate gradient method.SIAM journal on scientific computing, 23(2), pp.517 - 541. +/// - Knyazev, A.V., Argentati, M.E., Lashuk, I. and Ovtchinnikov, E.E., 2007. Block locally optimal preconditioned eigenvalue xolvers(BLOPEX) in HYPRE and PETSc.SIAM Journal on Scientific Computing, 29(5), pp.2224 - 2239. +/// +/// *** CONDITIONS OF USE +/// Locally Optimal Block Preconditioned Conjugate Gradient(LOBPCG) is a method for finding the M smallest eigenvalues +/// and eigenvectors of a large symmetric positive definite generalized eigenvalue problem +/// \f$Ax=\lambda Bx,\f$ +/// where \f$A_{NxN}\f$ is a symmetric matrix, \f$B\f$ is symmetric and positive - definite. \f$A and B\f$ are also assumed large and sparse +/// \f$\textit{M}\f$ should be \f$\<< textit{N}\f$ (at least \f$\textit{5M} < \textit{N} \f$) +/// +/// *** ARGUMENTS +/// Eigen::SparseMatrix A; // N*N - Ax = lambda*Bx, lrage and sparse +/// Eigen::SparseMatrix X; // N*M - initial approximations to eigenvectors (random in general case) +/// Spectra::LOBPCGSolver solver(A, X); +/// *Eigen::SparseMatrix B; // N*N - Ax = lambda*Bx, sparse, positive definite +/// solver.setConstraints(B); +/// *Eigen::SparseMatrix Y; // N*K - constraints, already found eigenvectors +/// solver.setB(B); +/// *Eigen::SparseMatrix T; // N*N - preconditioner ~ A^-1 +/// solver.setPreconditioner(T); +/// +/// *** OUTCOMES +/// solver.solve(); // compute eigenpairs // void +/// solver.info(); // state of converjance // int +/// solver.residuals(); // get residuals to evaluate biases // Eigen::Matrix +/// solver.eigenvalues(); // get eigenvalues // Eigen::Matrix +/// solver.eigenvectors(); // get eigenvectors // Eigen::Matrix +/// +/// *** EXAMPLE +/// \code{.cpp} +/// #include +/// +/// // random A +/// Matrix a; +/// a = (Matrix::Random(10, 10).array() > 0.6).cast() * Matrix::Random(10, 10).array() * 5; +/// a = Matrix((a).triangularView()) + Matrix((a).triangularView()).transpose(); +/// for (int i = 0; i < 10; i++) +/// a(i, i) = i + 0.5; +/// std::cout << a << "\n"; +/// Eigen::SparseMatrix A(a.sparseView()); +/// // random X +/// Eigen::Matrix x; +/// x = Matrix::Random(10, 2).array(); +/// Eigen::SparseMatrix X(x.sparseView()); +/// // solve Ax = lambda*x +/// Spectra::LOBPCGSolver solver(A, X); +/// solver.compute(10, 1e-4); // 10 iterations, L2_tolerance = 1e-4*N +/// std::cout << "info\n" << solver.info() << std::endl; +/// std::cout << "eigenvalues\n" << solver.eigenvalues() << std::endl; +/// std::cout << "eigenvectors\n" << solver.eigenvectors() << std::endl; +/// std::cout << "residuals\n" << solver.residuals() << std::endl; +/// \endcode +/// + +template +class LOBPCGSolver +{ +private: + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + typedef Eigen::SparseMatrix SparseMatrix; + typedef Eigen::SparseMatrix SparseComplexMatrix; + + const int m_n; // dimension of matrix A + const int m_nev; // number of eigenvalues requested + SparseMatrix A, X; + SparseMatrix m_Y, m_B, m_preconditioner; + bool flag_with_constraints, flag_with_B, flag_with_preconditioner; + +public: + SparseMatrix m_residuals; + Matrix m_evectors; + Vector m_evalues; + int m_info; + +private: + // B-orthonormalize matrix M + int orthogonalizeInPlace(SparseMatrix& M, SparseMatrix& B, + SparseMatrix& true_BM, bool has_true_BM = false) + { + SparseMatrix BM; + + if (has_true_BM == false) + { + if (flag_with_B) + { + BM = B * M; + } + else + { + BM = M; + } + } + else + { + BM = true_BM; + } + + Eigen::SimplicialLDLT chol_MBM(M.transpose() * BM); + + if (chol_MBM.info() != SUCCESSFUL) + { + // LDLT decomposition fail + m_info = chol_MBM.info(); + return chol_MBM.info(); + } + + SparseComplexMatrix Upper_MBM = chol_MBM.matrixU().template cast(); + ComplexVector D_MBM_vec = chol_MBM.vectorD().template cast(); + + D_MBM_vec = D_MBM_vec.cwiseSqrt(); + + for (int i = 0; i < D_MBM_vec.rows(); i++) + { + D_MBM_vec(i) = Complex(1.0, 0.0) / D_MBM_vec(i); + } + + SparseComplexMatrix D_MBM_mat(D_MBM_vec.asDiagonal()); + + SparseComplexMatrix U_inv(Upper_MBM.rows(), Upper_MBM.cols()); + U_inv.setIdentity(); + Upper_MBM.template triangularView().solveInPlace(U_inv); + + SparseComplexMatrix right_product = U_inv * D_MBM_mat; + M = M * right_product.real(); + if (flag_with_B) + { + true_BM = B * M; + } + else + { + true_BM = M; + } + + return SUCCESSFUL; + } + + void applyConstraintsInPlace(SparseMatrix& X, SparseMatrix& Y, + SparseMatrix& B) + { + SparseMatrix BY; + if (flag_with_B) + { + BY = B * Y; + } + else + { + BY = Y; + } + + SparseMatrix YBY = Y.transpose() * BY; + SparseMatrix BYX = BY.transpose() * X; + + SparseMatrix YBY_XYX = (Matrix(YBY).bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(Matrix(BYX))).sparseView(); + X = X - Y * YBY_XYX; + } + + /* + return + 'AB + CD' + */ + Matrix stack_4_matricies(Matrix A, Matrix B, + Matrix C, Matrix D) + { + Matrix result(A.rows() + C.rows(), A.cols() + B.cols()); + result.topLeftCorner(A.rows(), A.cols()) = A; + result.topRightCorner(B.rows(), B.cols()) = B; + result.bottomLeftCorner(C.rows(), C.cols()) = C; + result.bottomRightCorner(D.rows(), D.cols()) = D; + return result; + } + + Matrix stack_9_matricies(Matrix A, Matrix B, Matrix C, + Matrix D, Matrix E, Matrix F, + Matrix G, Matrix H, Matrix I) + { + Matrix result(A.rows() + D.rows() + G.rows(), A.cols() + B.cols() + C.cols()); + result.block(0, 0, A.rows(), A.cols()) = A; + result.block(0, A.cols(), B.rows(), B.cols()) = B; + result.block(0, A.cols() + B.cols(), C.rows(), C.cols()) = C; + result.block(A.rows(), 0, D.rows(), D.cols()) = D; + result.block(A.rows(), A.cols(), E.rows(), E.cols()) = E; + result.block(A.rows(), A.cols() + B.cols(), F.rows(), F.cols()) = F; + result.block(A.rows() + D.rows(), 0, G.rows(), G.cols()) = G; + result.block(A.rows() + D.rows(), A.cols(), H.rows(), H.cols()) = H; + result.block(A.rows() + D.rows(), A.cols() + B.cols(), I.rows(), I.cols()) = I; + + return result; + } + + void sort_epairs(Vector& evalues, Matrix& evectors, int SelectionRule) + { + std::function cmp; + if (SelectionRule == SMALLEST_ALGE) + cmp = std::less{}; + else + cmp = std::greater{}; + + std::map epairs(cmp); + for (int i = 0; i < m_evectors.cols(); ++i) + epairs.insert(std::make_pair(evalues(i), evectors.col(i))); + + int i = 0; + for (auto& epair : epairs) + { + evectors.col(i) = epair.second; + evalues(i) = epair.first; + i++; + } + } + + void removeColumns(SparseMatrix& matrix, std::vector& colToRemove) + { + // remove columns through matrix multiplication + SparseMatrix new_matrix(matrix.cols(), matrix.cols() - int(colToRemove.size())); + int iCol = 0; + std::vector> tripletList; + tripletList.reserve(matrix.cols() - int(colToRemove.size())); + + for (int iRow = 0; iRow < matrix.cols(); iRow++) + { + if (std::find(colToRemove.begin(), colToRemove.end(), iRow) == colToRemove.end()) + { + tripletList.push_back(Eigen::Triplet(iRow, iCol, 1)); + iCol++; + } + } + + new_matrix.setFromTriplets(tripletList.begin(), tripletList.end()); + matrix = matrix * new_matrix; + } + + int checkConvergence_getBlocksize(SparseMatrix& m_residuals, Scalar tolerance_L2, std::vector& columnsToDelete) + { + // square roots from sum of squares by column + int BlockSize = m_nev; + Scalar sum, buffer; + + for (int iCol = 0; iCol < m_nev; iCol++) + { + sum = 0; + for (int iRow = 0; iRow < m_n; iRow++) + { + buffer = m_residuals.coeff(iRow, iCol); + sum += buffer * buffer; + } + + if (sqrt(sum) < tolerance_L2) + { + BlockSize--; + columnsToDelete.push_back(iCol); + } + } + return BlockSize; + } + +public: + LOBPCGSolver(const SparseMatrix& A, const SparseMatrix X) : + m_n(A.rows()), + m_nev(X.cols()), + m_info(NOT_COMPUTED), + flag_with_constraints(false), + flag_with_B(false), + flag_with_preconditioner(false), + A(A), + X(X) + { + if (A.rows() != X.rows() || A.rows() != A.cols()) + throw std::invalid_argument("Wrong size"); + + //if (m_n < 5* m_nev) + // throw std::invalid_argument("The problem size is small compared to the block size. Use standard eigensolver"); + } + + void setConstraints(const SparseMatrix& Y) + { + m_Y = Y; + flag_with_constraints = true; + } + + void setB(const SparseMatrix& B) + { + if (B.rows() != A.rows() || B.cols() != A.cols()) + throw std::invalid_argument("Wrong size"); + m_B = B; + flag_with_B = true; + } + + void setPreconditioner(const SparseMatrix& preconditioner) + { + m_preconditioner = preconditioner; + flag_with_preconditioner = true; + } + + void compute(int maxit = 10, Scalar tol_div_n = 1e-7) + { + Scalar tolerance_L2 = tol_div_n * m_n; + int BlockSize; + int max_iter = std::min(m_n, maxit); + + SparseMatrix directions, AX, AR, BX, AD, ADD, DD, BDD, BD, XAD, RAD, DAD, XBD, RBD, BR, sparse_eVecX, sparse_eVecR, sparse_eVecD, inverse_matrix; + Matrix XAR, RAR, XBR, gramA, gramB, eVecX, eVecR, eVecD; + std::vector columnsToDelete; + + if (flag_with_constraints) + { + // Apply the constraints Y to X + applyConstraintsInPlace(X, m_Y, m_B); + } + + // Make initial vectors orthonormal + // implicit BX declaration + if (orthogonalizeInPlace(X, m_B, BX) != SUCCESSFUL) + { + max_iter = 0; + } + + AX = A * X; + // Solve the following NxN eigenvalue problem for all N eigenvalues and -vectors: + // first approximation via a dense problem + Eigen::EigenSolver eigs(Matrix(X.transpose() * AX)); + + if (eigs.info() != SUCCESSFUL) + { + m_info = eigs.info(); + max_iter = 0; + } + else + { + m_evalues = eigs.eigenvalues().real(); + m_evectors = eigs.eigenvectors().real(); + sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE); + sparse_eVecX = m_evectors.sparseView(); + + X = X * sparse_eVecX; + AX = AX * sparse_eVecX; + BX = BX * sparse_eVecX; + } + + for (int iter_num = 0; iter_num < max_iter; iter_num++) + { + m_residuals.resize(m_n, m_nev); + for (int i = 0; i < m_nev; i++) + { + m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i); + } + BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete); + + if (BlockSize == 0) + { + m_info = SUCCESSFUL; + break; + } + + // substitution of the original active mask + if (columnsToDelete.size() > 0) + { + removeColumns(m_residuals, columnsToDelete); + if (iter_num > 0) + { + removeColumns(directions, columnsToDelete); + removeColumns(AD, columnsToDelete); + removeColumns(BD, columnsToDelete); + } + columnsToDelete.clear(); // for next iteration + } + + if (flag_with_preconditioner) + { + // Apply the preconditioner to the residuals + m_residuals = m_preconditioner * m_residuals; + } + + if (flag_with_constraints) + { + // Apply the constraints Y to residuals + applyConstraintsInPlace(m_residuals, m_Y, m_B); + } + + if (orthogonalizeInPlace(m_residuals, m_B, BR) != SUCCESSFUL) + { + break; + } + AR = A * m_residuals; + + // Orthonormalize conjugate directions + if (iter_num > 0) + { + if (orthogonalizeInPlace(directions, m_B, BD, true) != SUCCESSFUL) + { + break; + } + AD = A * directions; + } + + // Perform the Rayleigh Ritz Procedure + XAR = Matrix(X.transpose() * AR); + RAR = Matrix(m_residuals.transpose() * AR); + XBR = Matrix(X.transpose() * BR); + + if (iter_num > 0) + { + XAD = X.transpose() * AD; + RAD = m_residuals.transpose() * AD; + DAD = directions.transpose() * AD; + XBD = X.transpose() * BD; + RBD = m_residuals.transpose() * BD; + + gramA = stack_9_matricies(m_evalues.asDiagonal(), XAR, XAD, XAR.transpose(), RAR, RAD, XAD.transpose(), RAD.transpose(), DAD.transpose()); + gramB = stack_9_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBD, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize), RBD, XBD.transpose(), RBD.transpose(), Matrix::Identity(BlockSize, BlockSize)); + } + else + { + gramA = stack_4_matricies(m_evalues.asDiagonal(), XAR, XAR.transpose(), RAR); + gramB = stack_4_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize)); + } + + //calculate the lowest/largest m eigenpairs; Solve the generalized eigenvalue problem. + DenseSymMatProd Aop(gramA); + DenseCholesky Bop(gramB); + + SymGEigsSolver, + DenseCholesky, GEIGS_CHOLESKY> + geigs(&Aop, &Bop, m_nev, std::min(10, int(gramA.rows()) - 1)); + + geigs.init(); + int nconv = geigs.compute(); + + //Mat evecs; + if (geigs.info() == SUCCESSFUL) + { + m_evalues = geigs.eigenvalues(); + m_evectors = geigs.eigenvectors(); + sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE); + } + else + { + // Problem With General EgenVec + m_info = geigs.info(); + break; + } + + // Compute Ritz vectors + if (iter_num > 0) + { + eVecX = m_evectors.block(0, 0, m_nev, m_nev); + eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev); + eVecD = m_evectors.block(m_nev + BlockSize, 0, BlockSize, m_nev); + + sparse_eVecX = eVecX.sparseView(); + sparse_eVecR = eVecR.sparseView(); + sparse_eVecD = eVecD.sparseView(); + + DD = m_residuals * sparse_eVecR; // new conjugate directions + ADD = AR * sparse_eVecR; + BDD = BR * sparse_eVecR; + + DD = DD + directions * sparse_eVecD; + ADD = ADD + AD * sparse_eVecD; + BDD = BDD + BD * sparse_eVecD; + } + else + { + eVecX = m_evectors.block(0, 0, m_nev, m_nev); + eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev); + + sparse_eVecX = eVecX.sparseView(); + sparse_eVecR = eVecR.sparseView(); + + DD = m_residuals * sparse_eVecR; + ADD = AR * sparse_eVecR; + BDD = BR * sparse_eVecR; + } + + X = X * sparse_eVecX + DD; + AX = AX * sparse_eVecX + ADD; + BX = BX * sparse_eVecX + BDD; + + directions = DD; + AD = ADD; + BD = BDD; + + } // iteration loop + + // calculate last residuals + m_residuals.resize(m_n, m_nev); + for (int i = 0; i < m_nev; i++) + { + m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i); + } + BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete); + + if (BlockSize == 0) + { + m_info = SUCCESSFUL; + } + } // compute + + Vector eigenvalues() + { + return m_evalues; + } + + Matrix eigenvectors() + { + return m_evectors; + } + + Matrix residuals() + { + return Matrix(m_residuals); + } + + int info() { return m_info; } +}; + +} // namespace Spectra + +#endif // LOBPCG_SOLVER diff --git a/gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h b/gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h new file mode 100644 index 000000000..2fab26b97 --- /dev/null +++ b/gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h @@ -0,0 +1,202 @@ +// Copyright (C) 2018 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef PARTIAL_SVD_SOLVER_H +#define PARTIAL_SVD_SOLVER_H + +#include +#include "../SymEigsSolver.h" + +namespace Spectra { + +// Abstract class for matrix operation +template +class SVDMatOp +{ +public: + virtual int rows() const = 0; + virtual int cols() const = 0; + + // y_out = A' * A * x_in or y_out = A * A' * x_in + virtual void perform_op(const Scalar* x_in, Scalar* y_out) = 0; + + virtual ~SVDMatOp() {} +}; + +// Operation of a tall matrix in SVD +// We compute the eigenvalues of A' * A +// MatrixType is either Eigen::Matrix or Eigen::SparseMatrix +template +class SVDTallMatOp : public SVDMatOp +{ +private: + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_dim; + Vector m_cache; + +public: + // Constructor + SVDTallMatOp(ConstGenericMatrix& mat) : + m_mat(mat), + m_dim(std::min(mat.rows(), mat.cols())), + m_cache(mat.rows()) + {} + + // These are the rows and columns of A' * A + int rows() const { return m_dim; } + int cols() const { return m_dim; } + + // y_out = A' * A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.cols()); + m_cache.noalias() = m_mat * x; + y.noalias() = m_mat.transpose() * m_cache; + } +}; + +// Operation of a wide matrix in SVD +// We compute the eigenvalues of A * A' +// MatrixType is either Eigen::Matrix or Eigen::SparseMatrix +template +class SVDWideMatOp : public SVDMatOp +{ +private: + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_dim; + Vector m_cache; + +public: + // Constructor + SVDWideMatOp(ConstGenericMatrix& mat) : + m_mat(mat), + m_dim(std::min(mat.rows(), mat.cols())), + m_cache(mat.cols()) + {} + + // These are the rows and columns of A * A' + int rows() const { return m_dim; } + int cols() const { return m_dim; } + + // y_out = A * A' * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + MapConstVec x(x_in, m_mat.rows()); + MapVec y(y_out, m_mat.rows()); + m_cache.noalias() = m_mat.transpose() * x; + y.noalias() = m_mat * m_cache; + } +}; + +// Partial SVD solver +// MatrixType is either Eigen::Matrix or Eigen::SparseMatrix +template > +class PartialSVDSolver +{ +private: + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_m; + const int m_n; + SVDMatOp* m_op; + SymEigsSolver >* m_eigs; + int m_nconv; + Matrix m_evecs; + +public: + // Constructor + PartialSVDSolver(ConstGenericMatrix& mat, int ncomp, int ncv) : + m_mat(mat), m_m(mat.rows()), m_n(mat.cols()), m_evecs(0, 0) + { + // Determine the matrix type, tall or wide + if (m_m > m_n) + { + m_op = new SVDTallMatOp(mat); + } + else + { + m_op = new SVDWideMatOp(mat); + } + + // Solver object + m_eigs = new SymEigsSolver >(m_op, ncomp, ncv); + } + + // Destructor + virtual ~PartialSVDSolver() + { + delete m_eigs; + delete m_op; + } + + // Computation + int compute(int maxit = 1000, Scalar tol = 1e-10) + { + m_eigs->init(); + m_nconv = m_eigs->compute(maxit, tol); + + return m_nconv; + } + + // The converged singular values + Vector singular_values() const + { + Vector svals = m_eigs->eigenvalues().cwiseSqrt(); + + return svals; + } + + // The converged left singular vectors + Matrix matrix_U(int nu) + { + if (m_evecs.cols() < 1) + { + m_evecs = m_eigs->eigenvectors(); + } + nu = std::min(nu, m_nconv); + if (m_m <= m_n) + { + return m_evecs.leftCols(nu); + } + + return m_mat * (m_evecs.leftCols(nu).array().rowwise() / m_eigs->eigenvalues().head(nu).transpose().array().sqrt()).matrix(); + } + + // The converged right singular vectors + Matrix matrix_V(int nv) + { + if (m_evecs.cols() < 1) + { + m_evecs = m_eigs->eigenvectors(); + } + nv = std::min(nv, m_nconv); + if (m_m > m_n) + { + return m_evecs.leftCols(nv); + } + + return m_mat.transpose() * (m_evecs.leftCols(nv).array().rowwise() / m_eigs->eigenvalues().head(nv).transpose().array().sqrt()).matrix(); + } +}; + +} // namespace Spectra + +#endif // PARTIAL_SVD_SOLVER_H diff --git a/gtsam/CMakeLists.txt b/gtsam/CMakeLists.txt index 16dca6736..8736a5954 100644 --- a/gtsam/CMakeLists.txt +++ b/gtsam/CMakeLists.txt @@ -14,7 +14,6 @@ set (gtsam_subdirs sam sfm slam - smart navigation ) @@ -137,6 +136,8 @@ target_include_directories(gtsam BEFORE PUBLIC # SuiteSparse_config $ $ + # Spectra + $ # CCOLAMD $ $ diff --git a/gtsam/base/DSFVector.h b/gtsam/base/DSFVector.h index fdbd96dc8..6e9bc5c6b 100644 --- a/gtsam/base/DSFVector.h +++ b/gtsam/base/DSFVector.h @@ -55,11 +55,6 @@ public: /// Merge the sets containing i1 and i2. Does nothing if i1 and i2 are already in the same set. void merge(const size_t& i1, const size_t& i2); - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - inline size_t findSet(size_t key) const {return find(key);} - inline void makeUnionInPlace(const size_t& i1, const size_t& i2) {return merge(i1,i2);} -#endif }; /** diff --git a/gtsam/base/GenericValue.h b/gtsam/base/GenericValue.h index 98425adde..0e928765f 100644 --- a/gtsam/base/GenericValue.h +++ b/gtsam/base/GenericValue.h @@ -30,6 +30,14 @@ #include #include // operator typeid +#ifdef _WIN32 +#define GENERICVALUE_VISIBILITY +#else +// This will trigger a LNKxxxx on MSVC, so disable for MSVC build +// Please refer to https://github.com/borglab/gtsam/blob/develop/Using-GTSAM-EXPORT.md +#define GENERICVALUE_VISIBILITY GTSAM_EXPORT +#endif + namespace gtsam { /** @@ -84,7 +92,7 @@ public: /// Virtual print function, uses traits void print(const std::string& str) const override { - std::cout << "(" << demangle(typeid(T).name()) << ") "; + std::cout << "(" << demangle(typeid(T).name()) << ")\n"; traits::Print(value_, str); } @@ -198,4 +206,12 @@ const ValueType& Value::cast() const { return dynamic_cast&>(*this).value(); } +/** Functional constructor of GenericValue so T can be automatically deduced + */ +template +GenericValue genericValue(const T& v) { + return GenericValue(v); +} + + } /* namespace gtsam */ diff --git a/gtsam/base/Manifold.h b/gtsam/base/Manifold.h index 9feb2b451..dbe497005 100644 --- a/gtsam/base/Manifold.h +++ b/gtsam/base/Manifold.h @@ -167,62 +167,6 @@ struct FixedDimension { BOOST_STATIC_ASSERT_MSG(value != Eigen::Dynamic, "FixedDimension instantiated for dymanically-sized type."); }; - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -/// Helper class to construct the product manifold of two other manifolds, M1 and M2 -/// Deprecated because of limited usefulness, maximum obfuscation -template -class ProductManifold: public std::pair { - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsManifold)); - -protected: - enum { dimension1 = traits::dimension }; - enum { dimension2 = traits::dimension }; - -public: - enum { dimension = dimension1 + dimension2 }; - inline static size_t Dim() { return dimension;} - inline size_t dim() const { return dimension;} - - typedef Eigen::Matrix TangentVector; - typedef OptionalJacobian ChartJacobian; - - /// Default constructor needs default constructors to be defined - ProductManifold():std::pair(M1(),M2()) {} - - // Construct from two original manifold values - ProductManifold(const M1& m1, const M2& m2):std::pair(m1,m2) {} - - /// Retract delta to manifold - ProductManifold retract(const TangentVector& xi) const { - M1 m1 = traits::Retract(this->first, xi.template head()); - M2 m2 = traits::Retract(this->second, xi.template tail()); - return ProductManifold(m1,m2); - } - - /// Compute the coordinates in the tangent space - TangentVector localCoordinates(const ProductManifold& other) const { - typename traits::TangentVector v1 = traits::Local(this->first, other.first); - typename traits::TangentVector v2 = traits::Local(this->second, other.second); - TangentVector v; - v << v1, v2; - return v; - } - - // Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html - enum { NeedsToAlign = (sizeof(M1) % 16) == 0 || (sizeof(M2) % 16) == 0 - }; -public: - GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) -}; - -// Define any direct product group to be a model of the multiplicative Group concept -template -struct traits > : internal::Manifold > { -}; -#endif - } // \ namespace gtsam ///** diff --git a/gtsam/base/Matrix.cpp b/gtsam/base/Matrix.cpp index e2d8f71d1..551bdac10 100644 --- a/gtsam/base/Matrix.cpp +++ b/gtsam/base/Matrix.cpp @@ -136,20 +136,24 @@ Vector operator^(const Matrix& A, const Vector & v) { return A.transpose() * v; } +const Eigen::IOFormat& matlabFormat() { + static const Eigen::IOFormat matlab( + Eigen::StreamPrecision, // precision + Eigen::DontAlignCols, // flags set such that rowSpacers are not added + ", ", // coeffSeparator + ";\n", // rowSeparator + "\t", // rowPrefix + "", // rowSuffix + "[\n", // matPrefix + "\n]" // matSuffix + ); + return matlab; +} + /* ************************************************************************* */ //3 argument call void print(const Matrix& A, const string &s, ostream& stream) { - static const Eigen::IOFormat matlab( - Eigen::StreamPrecision, // precision - 0, // flags - ", ", // coeffSeparator - ";\n", // rowSeparator - "\t", // rowPrefix - "", // rowSuffix - "[\n", // matPrefix - "\n]" // matSuffix - ); - cout << s << A.format(matlab) << endl; + cout << s << A.format(matlabFormat()) << endl; } /* ************************************************************************* */ diff --git a/gtsam/base/Matrix.h b/gtsam/base/Matrix.h index 1c1138438..071c33fca 100644 --- a/gtsam/base/Matrix.h +++ b/gtsam/base/Matrix.h @@ -23,14 +23,11 @@ // \callgraph #pragma once + #include #include #include -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -#include -#include -#include -#endif + #include #include #include @@ -76,6 +73,10 @@ GTSAM_MAKE_MATRIX_DEFS(9); typedef Eigen::Block SubMatrix; typedef Eigen::Block ConstSubMatrix; +// Matrix formatting arguments when printing. +// Akin to Matlab style. +const Eigen::IOFormat& matlabFormat(); + /** * equals with a tolerance */ @@ -517,23 +518,6 @@ struct MultiplyWithInverseFunction { const Operator phi_; }; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -inline Matrix zeros( size_t m, size_t n ) { return Matrix::Zero(m,n); } -inline Matrix ones( size_t m, size_t n ) { return Matrix::Ones(m,n); } -inline Matrix eye( size_t m, size_t n) { return Matrix::Identity(m, n); } -inline Matrix eye( size_t m ) { return eye(m,m); } -inline Matrix diag(const Vector& v) { return v.asDiagonal(); } -inline void multiplyAdd(double alpha, const Matrix& A, const Vector& x, Vector& e) { e += alpha * A * x; } -inline void multiplyAdd(const Matrix& A, const Vector& x, Vector& e) { e += A * x; } -inline void transposeMultiplyAdd(double alpha, const Matrix& A, const Vector& e, Vector& x) { x += alpha * A.transpose() * e; } -inline void transposeMultiplyAdd(const Matrix& A, const Vector& e, Vector& x) { x += A.transpose() * e; } -inline void transposeMultiplyAdd(double alpha, const Matrix& A, const Vector& e, SubVector x) { x += alpha * A.transpose() * e; } -inline void insertColumn(Matrix& A, const Vector& col, size_t j) { A.col(j) = col; } -inline void insertColumn(Matrix& A, const Vector& col, size_t i, size_t j) { A.col(j).segment(i, col.size()) = col; } -inline void solve(Matrix& A, Matrix& B) { B = A.fullPivLu().solve(B); } -inline Matrix inverse(const Matrix& A) { return A.inverse(); } -#endif - GTSAM_EXPORT Matrix LLt(const Matrix& A); GTSAM_EXPORT Matrix RtR(const Matrix& A); diff --git a/gtsam/base/Vector.h b/gtsam/base/Vector.h index 81be36c0a..319ad6ee6 100644 --- a/gtsam/base/Vector.h +++ b/gtsam/base/Vector.h @@ -256,26 +256,6 @@ GTSAM_EXPORT Vector concatVectors(const std::list& vs); * concatenate Vectors */ GTSAM_EXPORT Vector concatVectors(size_t nrVectors, ...); - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -inline Vector abs(const Vector& v){return v.cwiseAbs();} -inline Vector basis(size_t n, size_t i) { return Vector::Unit(n,i); } -inline Vector delta(size_t n, size_t i, double value){ return Vector::Unit(n, i) * value;} -inline size_t dim(const Vector& v) { return v.size(); } -inline Vector ediv(const Vector &a, const Vector &b) {assert (b.size()==a.size()); return a.cwiseQuotient(b);} -inline Vector esqrt(const Vector& v) { return v.cwiseSqrt();} -inline Vector emul(const Vector &a, const Vector &b) {assert (b.size()==a.size()); return a.cwiseProduct(b);} -inline double max(const Vector &a){return a.maxCoeff();} -inline double norm_2(const Vector& v) {return v.norm();} -inline Vector ones(size_t n) { return Vector::Ones(n); } -inline Vector reciprocal(const Vector &a) {return a.array().inverse();} -inline Vector repeat(size_t n, double value) {return Vector::Constant(n, value);} -inline const Vector sub(const Vector &v, size_t i1, size_t i2) {return v.segment(i1,i2-i1);} -inline void subInsert(Vector& fullVector, const Vector& subVector, size_t i) {fullVector.segment(i, subVector.size()) = subVector;} -inline double sum(const Vector &a){return a.sum();} -inline bool zero(const Vector& v){ return v.isZero(); } -inline Vector zero(size_t n) { return Vector::Zero(n); } -#endif } // namespace gtsam #include diff --git a/gtsam/base/timing.cpp b/gtsam/base/timing.cpp index b33f06045..4b41ea937 100644 --- a/gtsam/base/timing.cpp +++ b/gtsam/base/timing.cpp @@ -89,7 +89,7 @@ void TimingOutline::print(const std::string& outline) const { childOrder[child.second->myOrder_] = child.second; } // Print children - for(const ChildOrder::value_type order_child: childOrder) { + for(const ChildOrder::value_type& order_child: childOrder) { std::string childOutline(outline); childOutline += "| "; order_child.second->print(childOutline); diff --git a/gtsam/config.h.in b/gtsam/config.h.in index b480996ec..9dc10c36a 100644 --- a/gtsam/config.h.in +++ b/gtsam/config.h.in @@ -70,7 +70,7 @@ #cmakedefine GTSAM_THROW_CHEIRALITY_EXCEPTION // Make sure dependent projects that want it can see deprecated functions -#cmakedefine GTSAM_ALLOW_DEPRECATED_SINCE_V4 +#cmakedefine GTSAM_ALLOW_DEPRECATED_SINCE_V41 // Publish flag about Eigen typedef #cmakedefine GTSAM_TYPEDEF_POINTS_TO_VECTORS diff --git a/gtsam/discrete/DecisionTreeFactor.cpp b/gtsam/discrete/DecisionTreeFactor.cpp index f83349436..b7b9d7034 100644 --- a/gtsam/discrete/DecisionTreeFactor.cpp +++ b/gtsam/discrete/DecisionTreeFactor.cpp @@ -69,7 +69,7 @@ namespace gtsam { for(Key j: f.keys()) cs[j] = f.cardinality(j); // Convert map into keys DiscreteKeys keys; - for(const DiscreteKey& key: cs) + for(const std::pair& key: cs) keys.push_back(key); // apply operand ADT result = ADT::apply(f, op); diff --git a/gtsam/discrete/DiscreteBayesTree.h b/gtsam/discrete/DiscreteBayesTree.h index 3d6e016fd..29da5817e 100644 --- a/gtsam/discrete/DiscreteBayesTree.h +++ b/gtsam/discrete/DiscreteBayesTree.h @@ -45,6 +45,7 @@ class GTSAM_EXPORT DiscreteBayesTreeClique typedef boost::shared_ptr shared_ptr; typedef boost::weak_ptr weak_ptr; DiscreteBayesTreeClique() {} + virtual ~DiscreteBayesTreeClique() {} DiscreteBayesTreeClique( const boost::shared_ptr& conditional) : Base(conditional) {} diff --git a/gtsam/discrete/Potentials.cpp b/gtsam/discrete/Potentials.cpp index fe99ea975..331a76c13 100644 --- a/gtsam/discrete/Potentials.cpp +++ b/gtsam/discrete/Potentials.cpp @@ -56,7 +56,7 @@ bool Potentials::equals(const Potentials& other, double tol) const { /* ************************************************************************* */ void Potentials::print(const string& s, const KeyFormatter& formatter) const { cout << s << "\n Cardinalities: {"; - for (const DiscreteKey& key : cardinalities_) + for (const std::pair& key : cardinalities_) cout << formatter(key.first) << ":" << key.second << ", "; cout << "}" << endl; ADT::print(" "); diff --git a/gtsam/geometry/EssentialMatrix.h b/gtsam/geometry/EssentialMatrix.h index ca719eb37..909576aa0 100644 --- a/gtsam/geometry/EssentialMatrix.h +++ b/gtsam/geometry/EssentialMatrix.h @@ -176,17 +176,6 @@ class EssentialMatrix { /// @} -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - Point3 transform_to(const Point3& p, - OptionalJacobian<3, 5> DE = boost::none, - OptionalJacobian<3, 3> Dpoint = boost::none) const { - return transformTo(p, DE, Dpoint); - }; - /// @} -#endif - private: /// @name Advanced Interface /// @{ diff --git a/gtsam/geometry/Point2.cpp b/gtsam/geometry/Point2.cpp index 3d4bb753e..4cead869f 100644 --- a/gtsam/geometry/Point2.cpp +++ b/gtsam/geometry/Point2.cpp @@ -79,18 +79,6 @@ ostream &operator<<(ostream &os, const Point2& p) { return os; } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -boost::optional CircleCircleIntersection(double R_d, double r_d, double tol) { - return circleCircleIntersection(R_d, r_d, tol); -} -std::list CircleCircleIntersection(Point2 c1, Point2 c2, boost::optional fh) { - return circleCircleIntersection(c1, c2, fh); -} -std::list CircleCircleIntersection(Point2 c1, double r1, Point2 c2, double r2, double tol) { - return circleCircleIntersection(c1, r1, c2, r2, tol); -} -#endif - #endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS /* ************************************************************************* */ diff --git a/gtsam/geometry/Point2.h b/gtsam/geometry/Point2.h index 718fb2992..e186f7b67 100644 --- a/gtsam/geometry/Point2.h +++ b/gtsam/geometry/Point2.h @@ -46,12 +46,7 @@ public: /// @{ /// default constructor -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - // Deprecated default constructor initializes to zero, in contrast to new behavior below - Point2() { setZero(); } -#else - Point2() {} -#endif + Point2() {} using Vector2::Vector2; @@ -113,25 +108,7 @@ public: /// Streaming GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point2& p); -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - Point2 inverse() const { return -(*this); } - Point2 compose(const Point2& q) const { return (*this)+q;} - Point2 between(const Point2& q) const { return q-(*this);} - Vector2 localCoordinates(const Point2& q) const { return between(q);} - Point2 retract(const Vector2& v) const { return compose(Point2(v));} - static Vector2 Logmap(const Point2& p) { return p;} - static Point2 Expmap(const Vector2& v) { return Point2(v);} - inline double dist(const Point2& p2) const {return distance(p2);} - GTSAM_EXPORT static boost::optional CircleCircleIntersection(double R_d, double r_d, double tol = 1e-9); - GTSAM_EXPORT static std::list CircleCircleIntersection(Point2 c1, Point2 c2, boost::optional fh); - GTSAM_EXPORT static std::list CircleCircleIntersection(Point2 c1, double r1, Point2 c2, double r2, double tol = 1e-9); - /// @} -#endif - -private: - + private: /// @name Advanced Interface /// @{ diff --git a/gtsam/geometry/Point3.cpp b/gtsam/geometry/Point3.cpp index 8aa339a89..25fb9b92d 100644 --- a/gtsam/geometry/Point3.cpp +++ b/gtsam/geometry/Point3.cpp @@ -62,23 +62,6 @@ ostream &operator<<(ostream &os, const Point3& p) { return os; } -/* ************************************************************************* */ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -Point3 Point3::add(const Point3 &q, OptionalJacobian<3,3> H1, - OptionalJacobian<3,3> H2) const { - if (H1) *H1 = I_3x3; - if (H2) *H2 = I_3x3; - return *this + q; -} - -Point3 Point3::sub(const Point3 &q, OptionalJacobian<3,3> H1, - OptionalJacobian<3,3> H2) const { - if (H1) *H1 = I_3x3; - if (H2) *H2 = -I_3x3; - return *this - q; -} -#endif - #endif /* ************************************************************************* */ double distance3(const Point3 &p1, const Point3 &q, OptionalJacobian<1, 3> H1, diff --git a/gtsam/geometry/Point3.h b/gtsam/geometry/Point3.h index 3b2330403..1ab5c313e 100644 --- a/gtsam/geometry/Point3.h +++ b/gtsam/geometry/Point3.h @@ -51,11 +51,6 @@ class Point3 : public Vector3 { /// @name Standard Constructors /// @{ - // Deprecated default constructor initializes to zero, in contrast to new behavior below -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - Point3() { setZero(); } -#endif - using Vector3::Vector3; /// @} @@ -118,27 +113,7 @@ class Point3 : public Vector3 { /// Output stream operator GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point3& p); -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - Point3 inverse() const { return -(*this);} - Point3 compose(const Point3& q) const { return (*this)+q;} - Point3 between(const Point3& q) const { return q-(*this);} - Vector3 localCoordinates(const Point3& q) const { return between(q);} - Point3 retract(const Vector3& v) const { return compose(Point3(v));} - static Vector3 Logmap(const Point3& p) { return p;} - static Point3 Expmap(const Vector3& v) { return Point3(v);} - inline double dist(const Point3& q) const { return (q - *this).norm(); } - Point3 normalize(OptionalJacobian<3, 3> H = boost::none) const { return normalized(H);} - GTSAM_EXPORT Point3 add(const Point3& q, OptionalJacobian<3, 3> H1 = boost::none, - OptionalJacobian<3, 3> H2 = boost::none) const; - GTSAM_EXPORT Point3 sub(const Point3& q, OptionalJacobian<3, 3> H1 = boost::none, - OptionalJacobian<3, 3> H2 = boost::none) const; - /// @} -#endif - - private: - + private: /** Serialization function */ friend class boost::serialization::access; template diff --git a/gtsam/geometry/Pose2.cpp b/gtsam/geometry/Pose2.cpp index 9c41a76c8..71df0f753 100644 --- a/gtsam/geometry/Pose2.cpp +++ b/gtsam/geometry/Pose2.cpp @@ -322,10 +322,10 @@ boost::optional align(const vector& pairs) { // calculate cos and sin double c=0,s=0; for(const Point2Pair& pair: pairs) { - Point2 dq = pair.first - cp; - Point2 dp = pair.second - cq; - c += dp.x() * dq.x() + dp.y() * dq.y(); - s += dp.y() * dq.x() - dp.x() * dq.y(); // this works but is negative from formula above !! :-( + Point2 dp = pair.first - cp; + Point2 dq = pair.second - cq; + c += dp.x() * dq.x() + dp.y() * dq.y(); + s += -dp.y() * dq.x() + dp.x() * dq.y(); } // calculate angle and translation diff --git a/gtsam/geometry/Pose2.h b/gtsam/geometry/Pose2.h index 2a1f108ca..6372779c3 100644 --- a/gtsam/geometry/Pose2.h +++ b/gtsam/geometry/Pose2.h @@ -289,22 +289,6 @@ public: /// @} -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - Point2 transform_from(const Point2& point, - OptionalJacobian<2, 3> Dpose = boost::none, - OptionalJacobian<2, 2> Dpoint = boost::none) const { - return transformFrom(point, Dpose, Dpoint); - } - Point2 transform_to(const Point2& point, - OptionalJacobian<2, 3> Dpose = boost::none, - OptionalJacobian<2, 2> Dpoint = boost::none) const { - return transformTo(point, Dpose, Dpoint); - } - /// @} -#endif - private: // Serialization function diff --git a/gtsam/geometry/Pose3.cpp b/gtsam/geometry/Pose3.cpp index 1b9285100..ad8f7f484 100644 --- a/gtsam/geometry/Pose3.cpp +++ b/gtsam/geometry/Pose3.cpp @@ -106,9 +106,7 @@ Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y, /* ************************************************************************* */ void Pose3::print(const string& s) const { - cout << s; - R_.print("R:\n"); - cout << t_ << ";" << endl; + cout << (s.empty() ? s : s + " ") << *this << endl; } /* ************************************************************************* */ @@ -292,15 +290,6 @@ Pose3 Pose3::transformPoseFrom(const Pose3& aTb, OptionalJacobian<6, 6> Hself, return wTa.compose(aTb, Hself, HaTb); } -/* ************************************************************************* */ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -Pose3 Pose3::transform_to(const Pose3& pose) const { - Rot3 cRv = R_ * Rot3(pose.R_.inverse()); - Point3 t = pose.transform_to(t_); - return Pose3(cRv, t); -} -#endif - /* ************************************************************************* */ Pose3 Pose3::transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> Hself, OptionalJacobian<6, 6> HwTb) const { @@ -439,9 +428,9 @@ boost::optional align(const vector& baPointPairs) { /* ************************************************************************* */ std::ostream &operator<<(std::ostream &os, const Pose3& pose) { - os << pose.rotation() << "\n"; - const Point3& t = pose.translation(); - os << '[' << t.x() << ", " << t.y() << ", " << t.z() << "]\';\n"; + // Both Rot3 and Point3 have ostream definitions so we use them. + os << "R: " << pose.rotation() << "\n"; + os << "t: " << pose.translation(); return os; } diff --git a/gtsam/geometry/Pose3.h b/gtsam/geometry/Pose3.h index 3825b6241..159fd2927 100644 --- a/gtsam/geometry/Pose3.h +++ b/gtsam/geometry/Pose3.h @@ -326,30 +326,6 @@ public: GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Pose3& p); -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - Point3 transform_from(const Point3& point, - OptionalJacobian<3, 6> Hself = boost::none, - OptionalJacobian<3, 3> Hpoint = boost::none) const { - return transformFrom(point, Hself, Hpoint); - } - Point3 transform_to(const Point3& point, - OptionalJacobian<3, 6> Hself = boost::none, - OptionalJacobian<3, 3> Hpoint = boost::none) const { - return transformTo(point, Hself, Hpoint); - } - Pose3 transform_pose_to(const Pose3& pose, - OptionalJacobian<6, 6> Hself = boost::none, - OptionalJacobian<6, 6> Hpose = boost::none) const { - return transformPoseTo(pose, Hself, Hpose); - } - /** - * @deprecated: this function is neither here not there. */ - Pose3 transform_to(const Pose3& pose) const; - /// @} -#endif - private: /** Serialization function */ friend class boost::serialization::access; @@ -380,11 +356,6 @@ inline Matrix wedge(const Vector& xi) { return Pose3::wedge(xi(0), xi(1), xi(2), xi(3), xi(4), xi(5)); } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -// deprecated: use Pose3::Align with point pairs ordered the opposite way -GTSAM_EXPORT boost::optional align(const std::vector& baPointPairs); -#endif - // For MATLAB wrapper typedef std::vector Pose3Vector; diff --git a/gtsam/geometry/Rot2.cpp b/gtsam/geometry/Rot2.cpp index 04ed16774..7502c4ccf 100644 --- a/gtsam/geometry/Rot2.cpp +++ b/gtsam/geometry/Rot2.cpp @@ -39,6 +39,13 @@ Rot2 Rot2::atan2(double y, double x) { return R.normalize(); } +/* ************************************************************************* */ +Rot2 Rot2::Random(std::mt19937& rng) { + uniform_real_distribution randomAngle(-M_PI, M_PI); + double angle = randomAngle(rng); + return fromAngle(angle); +} + /* ************************************************************************* */ void Rot2::print(const string& s) const { cout << s << ": " << theta() << endl; diff --git a/gtsam/geometry/Rot2.h b/gtsam/geometry/Rot2.h index f46f12540..8a361f558 100644 --- a/gtsam/geometry/Rot2.h +++ b/gtsam/geometry/Rot2.h @@ -22,6 +22,8 @@ #include #include +#include + namespace gtsam { /** @@ -79,6 +81,14 @@ namespace gtsam { /** Named constructor that behaves as atan2, i.e., y,x order (!) and normalizes */ static Rot2 atan2(double y, double x); + /** + * Random, generates random angle \in [-p,pi] + * Example: + * std::mt19937 engine(42); + * Unit3 unit = Unit3::Random(engine); + */ + static Rot2 Random(std::mt19937 & rng); + /// @} /// @name Testable /// @{ diff --git a/gtsam/geometry/Rot3.cpp b/gtsam/geometry/Rot3.cpp index 01f62b8cb..aaf23e685 100644 --- a/gtsam/geometry/Rot3.cpp +++ b/gtsam/geometry/Rot3.cpp @@ -32,7 +32,8 @@ namespace gtsam { /* ************************************************************************* */ void Rot3::print(const std::string& s) const { - gtsam::print((Matrix)matrix(), s); + cout << (s.empty() ? "R: " : s + " "); + gtsam::print(static_cast(matrix())); } /* ************************************************************************* */ @@ -222,10 +223,7 @@ pair RQ(const Matrix3& A) { /* ************************************************************************* */ ostream &operator<<(ostream &os, const Rot3& R) { - os << "\n"; - os << '|' << R.r1().x() << ", " << R.r2().x() << ", " << R.r3().x() << "|\n"; - os << '|' << R.r1().y() << ", " << R.r2().y() << ", " << R.r3().y() << "|\n"; - os << '|' << R.r1().z() << ", " << R.r2().z() << ", " << R.r3().z() << "|\n"; + os << R.matrix().format(matlabFormat()); return os; } diff --git a/gtsam/geometry/Rot3.h b/gtsam/geometry/Rot3.h index 8f24f07c8..33d120a85 100644 --- a/gtsam/geometry/Rot3.h +++ b/gtsam/geometry/Rot3.h @@ -254,7 +254,7 @@ namespace gtsam { /// @{ /** print */ - void print(const std::string& s="R") const; + void print(const std::string& s="") const; /** equals with an tolerance */ bool equals(const Rot3& p, double tol = 1e-9) const; @@ -500,23 +500,6 @@ namespace gtsam { /// @} -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - static Rot3 rodriguez(const Point3& axis, double angle) { return AxisAngle(axis, angle); } - static Rot3 rodriguez(const Unit3& axis, double angle) { return AxisAngle(axis, angle); } - static Rot3 rodriguez(const Vector3& w) { return Rodrigues(w); } - static Rot3 rodriguez(double wx, double wy, double wz) { return Rodrigues(wx, wy, wz); } - static Rot3 yaw (double t) { return Yaw(t); } - static Rot3 pitch(double t) { return Pitch(t); } - static Rot3 roll (double t) { return Roll(t); } - static Rot3 ypr(double y, double p, double r) { return Ypr(r,p,y);} - static Rot3 quaternion(double w, double x, double y, double z) { - return Rot3::Quaternion(w, x, y, z); - } - /// @} -#endif - private: /** Serialization function */ friend class boost::serialization::access; diff --git a/gtsam/geometry/SOn-inl.h b/gtsam/geometry/SOn-inl.h index 0d7f3e108..6180f4cc7 100644 --- a/gtsam/geometry/SOn-inl.h +++ b/gtsam/geometry/SOn-inl.h @@ -26,13 +26,13 @@ using namespace std; namespace gtsam { -// Implementation for N>5 just uses dynamic version +// Implementation for N>=5 just uses dynamic version template typename SO::MatrixNN SO::Hat(const TangentVector& xi) { return SOn::Hat(xi); } -// Implementation for N>5 just uses dynamic version +// Implementation for N>=5 just uses dynamic version template typename SO::TangentVector SO::Vee(const MatrixNN& X) { return SOn::Vee(X); @@ -60,8 +60,9 @@ typename SO::TangentVector SO::ChartAtOrigin::Local(const SO& R, template typename SO::MatrixDD SO::AdjointMap() const { + if (N==2) return I_1x1; // SO(2) case throw std::runtime_error( - "SO::AdjointMap only implemented for SO3 and SO4."); + "SO::AdjointMap only implemented for SO2, SO3 and SO4."); } template @@ -84,30 +85,22 @@ typename SO::MatrixDD SO::LogmapDerivative(const TangentVector& omega) { throw std::runtime_error("O::LogmapDerivative only implemented for SO3."); } +// Default fixed size version (but specialized elsewehere for N=2,3,4) template typename SO::VectorN2 SO::vec( OptionalJacobian H) const { - const size_t n = rows(); - const size_t n2 = n * n; - // Vectorize - VectorN2 X(n2); - X << Eigen::Map(matrix_.data(), n2, 1); + VectorN2 X = Eigen::Map(matrix_.data()); // If requested, calculate H as (I \oplus Q) * P, // where Q is the N*N rotation matrix, and P is calculated below. if (H) { // Calculate P matrix of vectorized generators // TODO(duy): Should we refactor this as the jacobian of Hat? - const size_t d = dim(); - Matrix P(n2, d); - for (size_t j = 0; j < d; j++) { - const auto X = Hat(Eigen::VectorXd::Unit(d, j)); - P.col(j) = Eigen::Map(X.data(), n2, 1); - } - H->resize(n2, d); - for (size_t i = 0; i < n; i++) { - H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d); + Matrix P = SO::VectorizedGenerators(); + for (size_t i = 0; i < N; i++) { + H->block(i * N, 0, N, dimension) = + matrix_ * P.block(i * N, 0, N, dimension); } } return X; diff --git a/gtsam/geometry/SOn.cpp b/gtsam/geometry/SOn.cpp index 37b6c1784..c6cff4214 100644 --- a/gtsam/geometry/SOn.cpp +++ b/gtsam/geometry/SOn.cpp @@ -22,21 +22,18 @@ namespace gtsam { template <> -GTSAM_EXPORT -Matrix SOn::Hat(const Vector& xi) { +GTSAM_EXPORT void SOn::Hat(const Vector &xi, Eigen::Ref X) { size_t n = AmbientDim(xi.size()); - if (n < 2) throw std::invalid_argument("SO::Hat: n<2 not supported"); - - Matrix X(n, n); // allocate space for n*n skew-symmetric matrix - X.setZero(); - if (n == 2) { + if (n < 2) + throw std::invalid_argument("SO::Hat: n<2 not supported"); + else if (n == 2) { // Handle SO(2) case as recursion bottom assert(xi.size() == 1); X << 0, -xi(0), xi(0), 0; } else { // Recursively call SO(n-1) call for top-left block const size_t dmin = (n - 1) * (n - 2) / 2; - X.topLeftCorner(n - 1, n - 1) = Hat(xi.tail(dmin)); + Hat(xi.tail(dmin), X.topLeftCorner(n - 1, n - 1)); // determine sign of last element (signs alternate) double sign = pow(-1.0, xi.size()); @@ -47,7 +44,14 @@ Matrix SOn::Hat(const Vector& xi) { X(j, n - 1) = -X(n - 1, j); sign = -sign; } + X(n - 1, n - 1) = 0; // bottom-right } +} + +template <> GTSAM_EXPORT Matrix SOn::Hat(const Vector &xi) { + size_t n = AmbientDim(xi.size()); + Matrix X(n, n); // allocate space for n*n skew-symmetric matrix + SOn::Hat(xi, X); return X; } @@ -99,4 +103,27 @@ SOn LieGroup::between(const SOn& g, DynamicJacobian H1, return result; } +// Dynamic version of vec +template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const { + const size_t n = rows(), n2 = n * n; + + // Vectorize + VectorN2 X(n2); + X << Eigen::Map(matrix_.data(), n2, 1); + + // If requested, calculate H as (I \oplus Q) * P, + // where Q is the N*N rotation matrix, and P is calculated below. + if (H) { + // Calculate P matrix of vectorized generators + // TODO(duy): Should we refactor this as the jacobian of Hat? + Matrix P = SOn::VectorizedGenerators(n); + const size_t d = dim(); + H->resize(n2, d); + for (size_t i = 0; i < n; i++) { + H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d); + } + } + return X; +} + } // namespace gtsam diff --git a/gtsam/geometry/SOn.h b/gtsam/geometry/SOn.h index a6392c2f9..86b6019e1 100644 --- a/gtsam/geometry/SOn.h +++ b/gtsam/geometry/SOn.h @@ -98,8 +98,8 @@ class SO : public LieGroup, internal::DimensionSO(N)> { template > static SO Lift(size_t n, const Eigen::MatrixBase &R) { Matrix Q = Matrix::Identity(n, n); - size_t p = R.rows(); - assert(p < n && R.cols() == p); + const int p = R.rows(); + assert(p >= 0 && p <= static_cast(n) && R.cols() == p); Q.topLeftCorner(p, p) = R; return SO(Q); } @@ -208,7 +208,7 @@ class SO : public LieGroup, internal::DimensionSO(N)> { // Calculate run-time dimensionality of manifold. // Available as dimension or Dim() for fixed N. - size_t dim() const { return Dimension(matrix_.rows()); } + size_t dim() const { return Dimension(static_cast(matrix_.rows())); } /** * Hat operator creates Lie algebra element corresponding to d-vector, where d @@ -227,9 +227,10 @@ class SO : public LieGroup, internal::DimensionSO(N)> { */ static MatrixNN Hat(const TangentVector& xi); - /** - * Inverse of Hat. See note about xi element order in Hat. - */ + /// In-place version of Hat (see details there), implements recursion. + static void Hat(const Vector &xi, Eigen::Ref X); + + /// Inverse of Hat. See note about xi element order in Hat. static TangentVector Vee(const MatrixNN& X); // Chart at origin @@ -290,7 +291,34 @@ class SO : public LieGroup, internal::DimensionSO(N)> { * */ VectorN2 vec(OptionalJacobian H = boost::none) const; - /// @} + + /// Calculate N^2 x dim matrix of vectorized Lie algebra generators for SO(N) + template > + static Matrix VectorizedGenerators() { + constexpr size_t N2 = static_cast(N * N); + Eigen::Matrix G; + for (size_t j = 0; j < dimension; j++) { + const auto X = Hat(Vector::Unit(dimension, j)); + G.col(j) = Eigen::Map(X.data()); + } + return G; + } + + /// Calculate n^2 x dim matrix of vectorized Lie algebra generators for SO(n) + template > + static Matrix VectorizedGenerators(size_t n = 0) { + const size_t n2 = n * n, dim = Dimension(n); + Matrix G(n2, dim); + for (size_t j = 0; j < dim; j++) { + const auto X = Hat(Vector::Unit(dim, j)); + G.col(j) = Eigen::Map(X.data(), n2, 1); + } + return G; + } + + /// @{ + /// @name Serialization + /// @{ template friend void save(Archive&, SO&, const unsigned int); @@ -300,6 +328,8 @@ class SO : public LieGroup, internal::DimensionSO(N)> { friend void serialize(Archive&, SO&, const unsigned int); friend class boost::serialization::access; friend class Rot3; // for serialize + + /// @} }; using SOn = SO; @@ -333,6 +363,11 @@ template <> SOn LieGroup::between(const SOn& g, DynamicJacobian H1, DynamicJacobian H2) const; +/* + * Specialize dynamic vec. + */ +template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const; + /** Serialization function */ template void serialize( diff --git a/gtsam/geometry/tests/testPose3.cpp b/gtsam/geometry/tests/testPose3.cpp index a169c833c..596fa8957 100644 --- a/gtsam/geometry/tests/testPose3.cpp +++ b/gtsam/geometry/tests/testPose3.cpp @@ -862,7 +862,15 @@ TEST( Pose3, stream) Pose3 T; std::ostringstream os; os << T; - EXPECT(os.str() == "\n|1, 0, 0|\n|0, 1, 0|\n|0, 0, 1|\n\n[0, 0, 0]';\n"); + + string expected; +#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS + expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: 0\n0\n0";; +#else + expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: [0, 0, 0]'"; +#endif + + EXPECT(os.str() == expected); } //****************************************************************************** @@ -1032,19 +1040,22 @@ TEST(Pose3, print) { std::stringstream expected; Point3 translation(1, 2, 3); + // Add expected rotation + expected << "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\n"; + #ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS - expected << "1\n" + expected << "t: 1\n" "2\n" - "3;\n"; + "3\n"; #else - expected << '[' << translation.x() << ", " << translation.y() << ", " << translation.z() << "]\';"; + expected << "t: [" << translation.x() << ", " << translation.y() << ", " << translation.z() << "]'\n"; #endif // reset cout to the original stream std::cout.rdbuf(oldbuf); // Get substring corresponding to translation part - std::string actual = redirectStream.str().substr(38, 11); + std::string actual = redirectStream.str(); CHECK_EQUAL(expected.str(), actual); } diff --git a/gtsam/geometry/tests/testRot3.cpp b/gtsam/geometry/tests/testRot3.cpp index 598c57b24..d5400494e 100644 --- a/gtsam/geometry/tests/testRot3.cpp +++ b/gtsam/geometry/tests/testRot3.cpp @@ -608,7 +608,8 @@ TEST( Rot3, stream) Rot3 R; std::ostringstream os; os << R; - EXPECT(os.str() == "\n|1, 0, 0|\n|0, 1, 0|\n|0, 0, 1|\n"); + string expected = "[\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]"; + EXPECT(os.str() == expected); } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testSOn.cpp b/gtsam/geometry/tests/testSOn.cpp index 1cf8caed2..4d0ed98b3 100644 --- a/gtsam/geometry/tests/testSOn.cpp +++ b/gtsam/geometry/tests/testSOn.cpp @@ -39,8 +39,8 @@ using namespace std; using namespace gtsam; //****************************************************************************** -// Test dhynamic with n=0 -TEST(SOn, SO0) { +// Test dynamic with n=0 +TEST(SOn, SOn0) { const auto R = SOn(0); EXPECT_LONGS_EQUAL(0, R.rows()); EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension); @@ -50,7 +50,8 @@ TEST(SOn, SO0) { } //****************************************************************************** -TEST(SOn, SO5) { +// Test dynamic with n=5 +TEST(SOn, SOn5) { const auto R = SOn(5); EXPECT_LONGS_EQUAL(5, R.rows()); EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension); @@ -59,6 +60,28 @@ TEST(SOn, SO5) { EXPECT_LONGS_EQUAL(10, traits::GetDimension(R)); } +//****************************************************************************** +// Test fixed with n=2 +TEST(SOn, SO0) { + const auto R = SO<2>(); + EXPECT_LONGS_EQUAL(2, R.rows()); + EXPECT_LONGS_EQUAL(1, SO<2>::dimension); + EXPECT_LONGS_EQUAL(1, SO<2>::Dim()); + EXPECT_LONGS_EQUAL(1, R.dim()); + EXPECT_LONGS_EQUAL(1, traits>::GetDimension(R)); +} + +//****************************************************************************** +// Test fixed with n=5 +TEST(SOn, SO5) { + const auto R = SO<5>(); + EXPECT_LONGS_EQUAL(5, R.rows()); + EXPECT_LONGS_EQUAL(10, SO<5>::dimension); + EXPECT_LONGS_EQUAL(10, SO<5>::Dim()); + EXPECT_LONGS_EQUAL(10, R.dim()); + EXPECT_LONGS_EQUAL(10, traits>::GetDimension(R)); +} + //****************************************************************************** TEST(SOn, Concept) { BOOST_CONCEPT_ASSERT((IsGroup)); @@ -105,29 +128,29 @@ TEST(SOn, HatVee) { EXPECT(assert_equal((Vector)v.head<1>(), SOn::Vee(actual2))); Matrix expected3(3, 3); - expected3 << 0, -3, 2, // - 3, 0, -1, // - -2, 1, 0; + expected3 << 0, -3, 2, // + 3, 0, -1, // + -2, 1, 0; const auto actual3 = SOn::Hat(v.head<3>()); EXPECT(assert_equal(expected3, actual3)); EXPECT(assert_equal(skewSymmetric(1, 2, 3), actual3)); EXPECT(assert_equal((Vector)v.head<3>(), SOn::Vee(actual3))); Matrix expected4(4, 4); - expected4 << 0, -6, 5, 3, // - 6, 0, -4, -2, // - -5, 4, 0, 1, // - -3, 2, -1, 0; + expected4 << 0, -6, 5, 3, // + 6, 0, -4, -2, // + -5, 4, 0, 1, // + -3, 2, -1, 0; const auto actual4 = SOn::Hat(v.head<6>()); EXPECT(assert_equal(expected4, actual4)); EXPECT(assert_equal((Vector)v.head<6>(), SOn::Vee(actual4))); Matrix expected5(5, 5); - expected5 << 0,-10, 9, 7, -4, // - 10, 0, -8, -6, 3, // - -9, 8, 0, 5, -2, // - -7, 6, -5, 0, 1, // - 4, -3, 2, -1, 0; + expected5 << 0, -10, 9, 7, -4, // + 10, 0, -8, -6, 3, // + -9, 8, 0, 5, -2, // + -7, 6, -5, 0, 1, // + 4, -3, 2, -1, 0; const auto actual5 = SOn::Hat(v); EXPECT(assert_equal(expected5, actual5)); EXPECT(assert_equal((Vector)v, SOn::Vee(actual5))); @@ -159,6 +182,22 @@ TEST(SOn, RetractLocal) { CHECK(assert_equal(v1, SOn::ChartAtOrigin::Local(Q1), 1e-7)); } +//****************************************************************************** + +Matrix RetractJacobian(size_t n) { return SOn::VectorizedGenerators(n); } + +/// Test Jacobian of Retract at origin +TEST(SOn, RetractJacobian) { + Matrix actualH = RetractJacobian(3); + boost::function h = [](const Vector &v) { + return SOn::ChartAtOrigin::Retract(v).matrix(); + }; + Vector3 v; + v.setZero(); + const Matrix expectedH = numericalDerivative11(h, v, 1e-5); + CHECK(assert_equal(expectedH, actualH)); +} + //****************************************************************************** TEST(SOn, vec) { Vector10 v; @@ -166,11 +205,28 @@ TEST(SOn, vec) { SOn Q = SOn::ChartAtOrigin::Retract(v); Matrix actualH; const Vector actual = Q.vec(actualH); - boost::function h = [](const SOn& Q) { return Q.vec(); }; + boost::function h = [](const SOn &Q) { return Q.vec(); }; const Matrix H = numericalDerivative11(h, Q, 1e-5); CHECK(assert_equal(H, actualH)); } +//****************************************************************************** +TEST(SOn, VectorizedGenerators) { + // Default fixed + auto actual2 = SO<2>::VectorizedGenerators(); + CHECK(actual2.rows()==4 && actual2.cols()==1) + + // Specialized + auto actual3 = SO<3>::VectorizedGenerators(); + CHECK(actual3.rows()==9 && actual3.cols()==3) + auto actual4 = SO<4>::VectorizedGenerators(); + CHECK(actual4.rows()==16 && actual4.cols()==6) + + // Dynamic + auto actual5 = SOn::VectorizedGenerators(5); + CHECK(actual5.rows()==25 && actual5.cols()==10) +} + //****************************************************************************** int main() { TestResult tr; diff --git a/gtsam/geometry/triangulation.h b/gtsam/geometry/triangulation.h index 8cdf0fdc0..6f6c645b8 100644 --- a/gtsam/geometry/triangulation.h +++ b/gtsam/geometry/triangulation.h @@ -123,27 +123,6 @@ std::pair triangulationGraph( return std::make_pair(graph, values); } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -/// DEPRECATED: PinholeCamera specific version -template -Point3 triangulateNonlinear( - const CameraSet >& cameras, - const Point2Vector& measurements, const Point3& initialEstimate) { - return triangulateNonlinear > // - (cameras, measurements, initialEstimate); -} - -/// DEPRECATED: PinholeCamera specific version -template -std::pair triangulationGraph( - const CameraSet >& cameras, - const Point2Vector& measurements, Key landmarkKey, - const Point3& initialEstimate) { - return triangulationGraph > // - (cameras, measurements, landmarkKey, initialEstimate); -} -#endif - /** * Optimize for triangulation * @param graph nonlinear factors for projection diff --git a/gtsam/inference/BayesTree.h b/gtsam/inference/BayesTree.h index 10fc513ee..7199da0ad 100644 --- a/gtsam/inference/BayesTree.h +++ b/gtsam/inference/BayesTree.h @@ -249,25 +249,6 @@ namespace gtsam { // Friend JunctionTree because it directly fills roots and nodes index. template friend class EliminatableClusterTree; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - public: - /// @name Deprecated - /// @{ - void removePath(sharedClique clique, BayesNetType& bn, Cliques& orphans) { - removePath(clique, &bn, &orphans); - } - void removeTop(const KeyVector& keys, BayesNetType& bn, Cliques& orphans) { - removeTop(keys, &bn, &orphans); - } - void getCliqueData(BayesTreeCliqueData& stats, sharedClique clique) const { - getCliqueData(clique, &stats); - } - void addFactorsToGraph(FactorGraph& graph) const{ - addFactorsToGraph(& graph); - } - /// @} -#endif - private: /** Serialization function */ friend class boost::serialization::access; diff --git a/gtsam/inference/BayesTreeCliqueBase-inst.h b/gtsam/inference/BayesTreeCliqueBase-inst.h index a02fe274e..dfcc7c318 100644 --- a/gtsam/inference/BayesTreeCliqueBase-inst.h +++ b/gtsam/inference/BayesTreeCliqueBase-inst.h @@ -91,6 +91,7 @@ namespace gtsam { template size_t BayesTreeCliqueBase::numCachedSeparatorMarginals() const { + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); if (!cachedSeparatorMarginal_) return 0; @@ -144,6 +145,7 @@ namespace gtsam { typename BayesTreeCliqueBase::FactorGraphType BayesTreeCliqueBase::separatorMarginal( Eliminate function) const { + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); gttic(BayesTreeCliqueBase_separatorMarginal); // Check if the Separator marginal was already calculated if (!cachedSeparatorMarginal_) { @@ -206,6 +208,8 @@ namespace gtsam { // When a shortcut is requested, all of the shortcuts between it and the // root are also generated. So, if this clique's cached shortcut is set, // recursively call over all child cliques. Otherwise, it is unnecessary. + + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); if (cachedSeparatorMarginal_) { for(derived_ptr& child: children) { child->deleteCachedShortcuts(); diff --git a/gtsam/inference/BayesTreeCliqueBase.h b/gtsam/inference/BayesTreeCliqueBase.h index 7aca84635..7b79ccf68 100644 --- a/gtsam/inference/BayesTreeCliqueBase.h +++ b/gtsam/inference/BayesTreeCliqueBase.h @@ -24,6 +24,7 @@ #include #include +#include namespace gtsam { @@ -75,10 +76,28 @@ namespace gtsam { /** Construct from a conditional, leaving parent and child pointers uninitialized */ BayesTreeCliqueBase(const sharedConditional& conditional) : conditional_(conditional), problemSize_(1) {} + /** Shallow copy constructor */ + BayesTreeCliqueBase(const BayesTreeCliqueBase& c) : conditional_(c.conditional_), parent_(c.parent_), children(c.children), problemSize_(c.problemSize_), is_root(c.is_root) {} + + /** Shallow copy assignment constructor */ + BayesTreeCliqueBase& operator=(const BayesTreeCliqueBase& c) { + conditional_ = c.conditional_; + parent_ = c.parent_; + children = c.children; + problemSize_ = c.problemSize_; + is_root = c.is_root; + return *this; + } + /// @} - /// This stores the Cached separator margnal P(S) + /// This stores the Cached separator marginal P(S) mutable boost::optional cachedSeparatorMarginal_; + /// This protects Cached seperator marginal P(S) from concurrent read/writes + /// as many the functions which access it are const (hence the mutable) + /// leading to the false impression that these const functions are thread-safe + /// which is not true due to these mutable values. This is fixed by applying this mutex. + mutable std::mutex cachedSeparatorMarginalMutex_; public: sharedConditional conditional_; @@ -144,7 +163,9 @@ namespace gtsam { void deleteCachedShortcuts(); const boost::optional& cachedSeparatorMarginal() const { - return cachedSeparatorMarginal_; } + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); + return cachedSeparatorMarginal_; + } friend class BayesTree; @@ -159,7 +180,10 @@ namespace gtsam { KeyVector shortcut_indices(const derived_ptr& B, const FactorGraphType& p_Cp_B) const; /** Non-recursive delete cached shortcuts and marginals - internal only. */ - void deleteCachedShortcutsNonRecursive() { cachedSeparatorMarginal_ = boost::none; } + void deleteCachedShortcutsNonRecursive() { + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); + cachedSeparatorMarginal_ = boost::none; + } private: diff --git a/gtsam/inference/ISAM.h b/gtsam/inference/ISAM.h index fe6763a13..b4a5db740 100644 --- a/gtsam/inference/ISAM.h +++ b/gtsam/inference/ISAM.h @@ -72,17 +72,6 @@ class ISAM : public BAYESTREE { const Eliminate& function = EliminationTraitsType::DefaultEliminate); /// @} - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - void update_internal( - const FactorGraphType& newFactors, Cliques& orphans, - const Eliminate& function = EliminationTraitsType::DefaultEliminate) { - updateInternal(newFactors, &orphans, function); - } - /// @} -#endif }; } // namespace gtsam diff --git a/gtsam/linear/GaussianBayesTree.h b/gtsam/linear/GaussianBayesTree.h index b6f5acd52..e2d8ae87a 100644 --- a/gtsam/linear/GaussianBayesTree.h +++ b/gtsam/linear/GaussianBayesTree.h @@ -41,6 +41,7 @@ namespace gtsam { typedef boost::shared_ptr shared_ptr; typedef boost::weak_ptr weak_ptr; GaussianBayesTreeClique() {} + virtual ~GaussianBayesTreeClique() {} GaussianBayesTreeClique(const boost::shared_ptr& conditional) : Base(conditional) {} }; diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index fdbe511f8..0ea597f99 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -128,17 +128,9 @@ namespace gtsam { /** Scale the values in \c gy according to the sigmas for the frontal variables in this * conditional. */ void scaleFrontalsBySigma(VectorValues& gy) const; -// __declspec(deprecated) void scaleFrontalsBySigma(VectorValues& gy) const; // FIXME: depreciated flag doesn't appear to exist? -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - constABlock get_R() const { return R(); } - constABlock get_S() const { return S(); } - constABlock get_S(const_iterator it) const { return S(it); } - const constBVector get_d() const { return d(); } - /// @} -#endif + // FIXME: deprecated flag doesn't appear to exist? + // __declspec(deprecated) void scaleFrontalsBySigma(VectorValues& gy) const; private: /** Serialization function */ diff --git a/gtsam/linear/HessianFactor.h b/gtsam/linear/HessianFactor.h index 64b764087..a4de46104 100644 --- a/gtsam/linear/HessianFactor.h +++ b/gtsam/linear/HessianFactor.h @@ -354,16 +354,7 @@ namespace gtsam { /// Solve the system A'*A delta = A'*b in-place, return delta as VectorValues VectorValues solve(); - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - const SymmetricBlockMatrix& matrixObject() const { return info_; } - /// @} -#endif - - private: - + private: /// Allocate for given scatter pattern void Allocate(const Scatter& scatter); diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp index b8a08be9e..2e0ffb034 100644 --- a/gtsam/linear/JacobianFactor.cpp +++ b/gtsam/linear/JacobianFactor.cpp @@ -421,21 +421,11 @@ JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph, /* ************************************************************************* */ void JacobianFactor::print(const string& s, const KeyFormatter& formatter) const { - static const Eigen::IOFormat matlab( - Eigen::StreamPrecision, // precision - 0, // flags - " ", // coeffSeparator - ";\n", // rowSeparator - "\t", // rowPrefix - "", // rowSuffix - "[\n", // matPrefix - "\n ]" // matSuffix - ); if (!s.empty()) cout << s << "\n"; for (const_iterator key = begin(); key != end(); ++key) { cout << boost::format(" A[%1%] = ") % formatter(*key); - cout << getA(key).format(matlab) << endl; + cout << getA(key).format(matlabFormat()) << endl; } cout << formatMatrixIndented(" b = ", getb(), true) << "\n"; if (model_) diff --git a/gtsam/linear/LossFunctions.h b/gtsam/linear/LossFunctions.h index a126b37db..771992e80 100644 --- a/gtsam/linear/LossFunctions.h +++ b/gtsam/linear/LossFunctions.h @@ -82,10 +82,6 @@ class GTSAM_EXPORT Base { */ virtual double loss(double distance) const { return 0; }; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - virtual double residual(double distance) const { return loss(distance); }; -#endif - /* * This method is responsible for returning the weight function for a given * amount of error. The weight function is related to the analytic derivative @@ -278,14 +274,6 @@ class GTSAM_EXPORT Welsch : public Base { ar &BOOST_SERIALIZATION_NVP(c_); } }; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -/// @name Deprecated -/// @{ -// Welsh implements the "Welsch" robust error model (Zhang97ivc) -// This was misspelled in previous versions of gtsam and should be -// removed in the future. -using Welsh = Welsch; -#endif /// GemanMcClure implements the "Geman-McClure" robust error model /// (Zhang97ivc). diff --git a/gtsam/linear/NoiseModel.cpp b/gtsam/linear/NoiseModel.cpp index f5ec95696..cf10cf115 100644 --- a/gtsam/linear/NoiseModel.cpp +++ b/gtsam/linear/NoiseModel.cpp @@ -376,17 +376,6 @@ Vector Constrained::whiten(const Vector& v) const { return c; } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -/* ************************************************************************* */ -double Constrained::error(const Vector& v) const { - Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements - for (size_t i=0; i& A, Vector& b) const = 0; virtual void WhitenSystem(Matrix& A, Vector& b) const = 0; virtual void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const = 0; @@ -226,19 +217,6 @@ namespace gtsam { Vector whiten(const Vector& v) const override; Vector unwhiten(const Vector& v) const override; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - virtual double Mahalanobis(const Vector& v) const { - return squaredMahalanobisDistance(v); - } - - /** - * error value 0.5 * v'*R'*R*v - */ - inline double error(const Vector& v) const override { - return 0.5 * squaredMahalanobisDistance(v); - } -#endif - /** * Multiply a derivative with R (derivative of whiten) * Equivalent to whitening each column of the input matrix. @@ -483,15 +461,6 @@ namespace gtsam { return MixedVariances(precisions.array().inverse()); } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /** - * The error function for a constrained noisemodel, - * for non-constrained versions, uses sigmas, otherwise - * uses the penalty function with mu - */ - double error(const Vector& v) const override; -#endif - double squaredMahalanobisDistance(const Vector& v) const override; /** Fully constrained variations */ @@ -720,14 +689,6 @@ namespace gtsam { { Vector b; Matrix B=A; this->WhitenSystem(B,b); return B; } inline Vector unwhiten(const Vector& /*v*/) const override { throw std::invalid_argument("unwhiten is not currently supported for robust noise models."); } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - inline double distance(const Vector& v) override { - return robust_->loss(this->unweightedWhiten(v).norm()); - } - // Fold the use of the m-estimator loss(...) function into error(...) - inline double error(const Vector& v) const override - { return robust_->loss(noise_->mahalanobisDistance(v)); } -#endif double loss(const double squared_distance) const override { return robust_->loss(std::sqrt(squared_distance)); diff --git a/gtsam/linear/Preconditioner.cpp b/gtsam/linear/Preconditioner.cpp index 1c602a963..24a42afb8 100644 --- a/gtsam/linear/Preconditioner.cpp +++ b/gtsam/linear/Preconditioner.cpp @@ -145,7 +145,7 @@ void BlockJacobiPreconditioner::build( /* getting the block diagonals over the factors */ std::map hessianMap =gfg.hessianBlockDiagonal(); - for ( const Matrix hessian: hessianMap | boost::adaptors::map_values) + for (const Matrix& hessian: hessianMap | boost::adaptors::map_values) blocks.push_back(hessian); /* if necessary, allocating the memory for cacheing the factorization results */ diff --git a/gtsam/linear/Sampler.cpp b/gtsam/linear/Sampler.cpp index 16c7e73e0..4957dfa14 100644 --- a/gtsam/linear/Sampler.cpp +++ b/gtsam/linear/Sampler.cpp @@ -54,17 +54,6 @@ Vector Sampler::sample() const { return sampleDiagonal(sigmas); } -/* ************************************************************************* */ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -Sampler::Sampler(uint_fast64_t seed) : generator_(seed) {} - -Vector Sampler::sampleNewModel( - const noiseModel::Diagonal::shared_ptr& model) const { - assert(model.get()); - const Vector& sigmas = model->sigmas(); - return sampleDiagonal(sigmas); -} -#endif /* ************************************************************************* */ } // namespace gtsam diff --git a/gtsam/linear/Sampler.h b/gtsam/linear/Sampler.h index 54c240a2b..bb5098f34 100644 --- a/gtsam/linear/Sampler.h +++ b/gtsam/linear/Sampler.h @@ -84,14 +84,6 @@ class GTSAM_EXPORT Sampler { /// @} -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - explicit Sampler(uint_fast64_t seed = 42u); - Vector sampleNewModel(const noiseModel::Diagonal::shared_ptr& model) const; - /// @} -#endif - protected: /** given sigmas for a diagonal model, returns a sample */ Vector sampleDiagonal(const Vector& sigmas) const; diff --git a/gtsam/linear/SubgraphSolver.cpp b/gtsam/linear/SubgraphSolver.cpp index 56b843e8d..f49f9a135 100644 --- a/gtsam/linear/SubgraphSolver.cpp +++ b/gtsam/linear/SubgraphSolver.cpp @@ -65,23 +65,6 @@ SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab1, : SubgraphSolver(Ab1.eliminateSequential(ordering, EliminateQR), Ab2, parameters) {} -/**************************************************************************************************/ -// deprecated variants -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1, - const GaussianFactorGraph &Ab2, - const Parameters ¶meters) - : SubgraphSolver(Rc1, boost::make_shared(Ab2), - parameters) {} - -SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab1, - const GaussianFactorGraph &Ab2, - const Parameters ¶meters, - const Ordering &ordering) - : SubgraphSolver(Ab1, boost::make_shared(Ab2), - parameters, ordering) {} -#endif - /**************************************************************************************************/ VectorValues SubgraphSolver::optimize() const { VectorValues ybar = conjugateGradients &A, - const Parameters ¶meters, const Ordering &ordering) - : SubgraphSolver(*A, parameters, ordering) {} - SubgraphSolver(const GaussianFactorGraph &, const GaussianFactorGraph &, - const Parameters &, const Ordering &); - SubgraphSolver(const boost::shared_ptr &Ab1, - const boost::shared_ptr &Ab2, - const Parameters ¶meters, const Ordering &ordering) - : SubgraphSolver(*Ab1, Ab2, parameters, ordering) {} - SubgraphSolver(const boost::shared_ptr &, - const GaussianFactorGraph &, const Parameters &); - /// @} -#endif }; } // namespace gtsam diff --git a/gtsam/linear/VectorValues.cpp b/gtsam/linear/VectorValues.cpp index 0a25617e4..9b5868744 100644 --- a/gtsam/linear/VectorValues.cpp +++ b/gtsam/linear/VectorValues.cpp @@ -45,7 +45,7 @@ namespace gtsam { /* ************************************************************************* */ VectorValues::VectorValues(const Vector& x, const Dims& dims) { - typedef pair Pair; + using Pair = pair; size_t j = 0; for (const Pair& v : dims) { Key key; diff --git a/gtsam/linear/tests/testGaussianBayesNet.cpp b/gtsam/linear/tests/testGaussianBayesNet.cpp index 488368c72..eafefb3cb 100644 --- a/gtsam/linear/tests/testGaussianBayesNet.cpp +++ b/gtsam/linear/tests/testGaussianBayesNet.cpp @@ -137,7 +137,7 @@ TEST( GaussianBayesNet, optimize3 ) } /* ************************************************************************* */ -TEST(GaussianBayesNet, ordering) +TEST(GaussianBayesNet, ordering) { Ordering expected; expected += _x_, _y_; @@ -155,7 +155,7 @@ TEST( GaussianBayesNet, MatrixStress ) bn.emplace_shared(_z_, Vector2(5, 6), 6 * I_2x2); const VectorValues expected = bn.optimize(); - for (const auto keys : + for (const auto& keys : {KeyVector({_x_, _y_, _z_}), KeyVector({_x_, _z_, _y_}), KeyVector({_y_, _x_, _z_}), KeyVector({_y_, _z_, _x_}), KeyVector({_z_, _x_, _y_}), KeyVector({_z_, _y_, _x_})}) { @@ -183,7 +183,7 @@ TEST( GaussianBayesNet, backSubstituteTranspose ) VectorValues actual = smallBayesNet.backSubstituteTranspose(x); EXPECT(assert_equal(expected, actual)); - + const auto ordering = noisyBayesNet.ordering(); const Matrix R = smallBayesNet.matrix(ordering).first; const Vector expected_vector = R.transpose().inverse() * x.vector(ordering); @@ -206,7 +206,7 @@ TEST( GaussianBayesNet, backSubstituteTransposeNoisy ) VectorValues actual = noisyBayesNet.backSubstituteTranspose(x); EXPECT(assert_equal(expected, actual)); - + const auto ordering = noisyBayesNet.ordering(); const Matrix R = noisyBayesNet.matrix(ordering).first; const Vector expected_vector = R.transpose().inverse() * x.vector(ordering); diff --git a/gtsam/navigation/AHRSFactor.h b/gtsam/navigation/AHRSFactor.h index c3765b8cf..ec1a07f65 100644 --- a/gtsam/navigation/AHRSFactor.h +++ b/gtsam/navigation/AHRSFactor.h @@ -75,7 +75,7 @@ class GTSAM_EXPORT PreintegratedAhrsMeasurements : public PreintegratedRotation biasHat_(bias_hat), preintMeasCov_(preint_meas_cov) {} - const Params& p() const { return *boost::static_pointer_cast(p_);} + Params& p() const { return *boost::static_pointer_cast(p_);} const Vector3& biasHat() const { return biasHat_; } const Matrix3& preintMeasCov() const { return preintMeasCov_; } diff --git a/gtsam/navigation/CombinedImuFactor.cpp b/gtsam/navigation/CombinedImuFactor.cpp index d7b4b7bf1..e41a8de44 100644 --- a/gtsam/navigation/CombinedImuFactor.cpp +++ b/gtsam/navigation/CombinedImuFactor.cpp @@ -148,29 +148,6 @@ void PreintegratedCombinedMeasurements::integrateMeasurement( preintMeasCov_ = F * preintMeasCov_ * F.transpose() + G_measCov_Gt; } -//------------------------------------------------------------------------------ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -PreintegratedCombinedMeasurements::PreintegratedCombinedMeasurements( - const imuBias::ConstantBias& biasHat, const Matrix3& measuredAccCovariance, - const Matrix3& measuredOmegaCovariance, - const Matrix3& integrationErrorCovariance, const Matrix3& biasAccCovariance, - const Matrix3& biasOmegaCovariance, const Matrix6& biasAccOmegaInt, - const bool use2ndOrderIntegration) { - if (!use2ndOrderIntegration) - throw("PreintegratedImuMeasurements no longer supports first-order integration: it incorrectly compensated for gravity"); - biasHat_ = biasHat; - boost::shared_ptr p = Params::MakeSharedD(); - p->gyroscopeCovariance = measuredOmegaCovariance; - p->accelerometerCovariance = measuredAccCovariance; - p->integrationCovariance = integrationErrorCovariance; - p->biasAccCovariance = biasAccCovariance; - p->biasOmegaCovariance = biasOmegaCovariance; - p->biasAccOmegaInt = biasAccOmegaInt; - p_ = p; - resetIntegration(); - preintMeasCov_.setZero(); -} -#endif //------------------------------------------------------------------------------ // CombinedImuFactor methods //------------------------------------------------------------------------------ @@ -190,10 +167,11 @@ gtsam::NonlinearFactor::shared_ptr CombinedImuFactor::clone() const { //------------------------------------------------------------------------------ void CombinedImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "CombinedImuFactor(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) << "," - << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) << "," - << keyFormatter(this->key6()) << ")\n"; + cout << (s == "" ? s : s + "\n") << "CombinedImuFactor(" + << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << "," + << keyFormatter(this->key3()) << "," << keyFormatter(this->key4()) << "," + << keyFormatter(this->key5()) << "," << keyFormatter(this->key6()) + << ")\n"; _PIM_.print(" preintegrated measurements:"); this->noiseModel_->print(" noise model: "); } @@ -275,41 +253,6 @@ std::ostream& operator<<(std::ostream& os, const CombinedImuFactor& f) { os << " noise model sigmas: " << f.noiseModel_->sigmas().transpose(); return os; } - -//------------------------------------------------------------------------------ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -CombinedImuFactor::CombinedImuFactor( - Key pose_i, Key vel_i, Key pose_j, Key vel_j, Key bias_i, Key bias_j, - const CombinedPreintegratedMeasurements& pim, const Vector3& n_gravity, - const Vector3& omegaCoriolis, const boost::optional& body_P_sensor, - const bool use2ndOrderCoriolis) -: Base(noiseModel::Gaussian::Covariance(pim.preintMeasCov_), pose_i, vel_i, - pose_j, vel_j, bias_i, bias_j), -_PIM_(pim) { - using P = CombinedPreintegratedMeasurements::Params; - auto p = boost::allocate_shared

(Eigen::aligned_allocator

(), pim.p()); - p->n_gravity = n_gravity; - p->omegaCoriolis = omegaCoriolis; - p->body_P_sensor = body_P_sensor; - p->use2ndOrderCoriolis = use2ndOrderCoriolis; - _PIM_.p_ = p; -} - -void CombinedImuFactor::Predict(const Pose3& pose_i, const Vector3& vel_i, - Pose3& pose_j, Vector3& vel_j, - const imuBias::ConstantBias& bias_i, - CombinedPreintegratedMeasurements& pim, - const Vector3& n_gravity, - const Vector3& omegaCoriolis, - const bool use2ndOrderCoriolis) { - // use deprecated predict - PoseVelocityBias pvb = pim.predict(pose_i, vel_i, bias_i, n_gravity, - omegaCoriolis, use2ndOrderCoriolis); - pose_j = pvb.pose; - vel_j = pvb.velocity; -} -#endif - } /// namespace gtsam diff --git a/gtsam/navigation/CombinedImuFactor.h b/gtsam/navigation/CombinedImuFactor.h index 6181f3d0d..387353136 100644 --- a/gtsam/navigation/CombinedImuFactor.h +++ b/gtsam/navigation/CombinedImuFactor.h @@ -220,17 +220,6 @@ public: /// @} -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// deprecated constructor - /// NOTE(frank): assumes Z-Down convention, only second order integration supported - PreintegratedCombinedMeasurements(const imuBias::ConstantBias& biasHat, - const Matrix3& measuredAccCovariance, - const Matrix3& measuredOmegaCovariance, - const Matrix3& integrationErrorCovariance, - const Matrix3& biasAccCovariance, const Matrix3& biasOmegaCovariance, - const Matrix6& biasAccOmegaInt, const bool use2ndOrderIntegration = true); -#endif - private: /// Serialization function friend class boost::serialization::access; @@ -314,8 +303,8 @@ public: GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const CombinedImuFactor&); /// print - void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const override; + void print(const std::string& s = "", const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; /// equals bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; @@ -338,26 +327,7 @@ public: boost::optional H4 = boost::none, boost::optional H5 = boost::none, boost::optional H6 = boost::none) const override; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @deprecated typename - typedef gtsam::PreintegratedCombinedMeasurements CombinedPreintegratedMeasurements; - - /// @deprecated constructor - CombinedImuFactor(Key pose_i, Key vel_i, Key pose_j, Key vel_j, Key bias_i, - Key bias_j, const CombinedPreintegratedMeasurements& pim, - const Vector3& n_gravity, const Vector3& omegaCoriolis, - const boost::optional& body_P_sensor = boost::none, - const bool use2ndOrderCoriolis = false); - - // @deprecated use PreintegrationBase::predict - static void Predict(const Pose3& pose_i, const Vector3& vel_i, Pose3& pose_j, - Vector3& vel_j, const imuBias::ConstantBias& bias_i, - CombinedPreintegratedMeasurements& pim, - const Vector3& n_gravity, const Vector3& omegaCoriolis, - const bool use2ndOrderCoriolis = false); -#endif - -private: + private: /** Serialization function */ friend class boost::serialization::access; template diff --git a/gtsam/navigation/ImuFactor.cpp b/gtsam/navigation/ImuFactor.cpp index 582d96acb..cebddf05d 100644 --- a/gtsam/navigation/ImuFactor.cpp +++ b/gtsam/navigation/ImuFactor.cpp @@ -106,32 +106,6 @@ void PreintegratedImuMeasurements::mergeWith(const PreintegratedImuMeasurements& preintMeasCov_ = P + *H2 * pim12.preintMeasCov_ * H2->transpose(); } #endif -//------------------------------------------------------------------------------ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -PreintegratedImuMeasurements::PreintegratedImuMeasurements( - const imuBias::ConstantBias& biasHat, const Matrix3& measuredAccCovariance, - const Matrix3& measuredOmegaCovariance, - const Matrix3& integrationErrorCovariance, bool use2ndOrderIntegration) { - if (!use2ndOrderIntegration) - throw("PreintegratedImuMeasurements no longer supports first-order integration: it incorrectly compensated for gravity"); - biasHat_ = biasHat; - boost::shared_ptr p = Params::MakeSharedD(); - p->gyroscopeCovariance = measuredOmegaCovariance; - p->accelerometerCovariance = measuredAccCovariance; - p->integrationCovariance = integrationErrorCovariance; - p_ = p; - resetIntegration(); -} - -void PreintegratedImuMeasurements::integrateMeasurement( - const Vector3& measuredAcc, const Vector3& measuredOmega, double deltaT, - boost::optional body_P_sensor) { - // modify parameters to accommodate deprecated method:-( - p_->body_P_sensor = body_P_sensor; - integrateMeasurement(measuredAcc, measuredOmega, deltaT); -} -#endif - //------------------------------------------------------------------------------ // ImuFactor methods //------------------------------------------------------------------------------ @@ -156,10 +130,10 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor& f) { //------------------------------------------------------------------------------ void ImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "ImuFactor(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) << "," - << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) - << ")\n"; + cout << (s == "" ? s : s + "\n") << "ImuFactor(" << keyFormatter(this->key1()) + << "," << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) + << "," << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) + << ")\n"; cout << *this << endl; } @@ -218,43 +192,15 @@ ImuFactor::shared_ptr ImuFactor::Merge(const shared_ptr& f01, // return new factor auto pim02 = Merge(f01->preintegratedMeasurements(), f12->preintegratedMeasurements()); - return boost::make_shared(f01->key1(),// P0 - f01->key2(),// V0 - f12->key3(),// P2 - f12->key4(),// V2 - f01->key5(),// B + return boost::make_shared(f01->key1(), // P0 + f01->key2(), // V0 + f12->key3(), // P2 + f12->key4(), // V2 + f01->key5(), // B pim02); } #endif -//------------------------------------------------------------------------------ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -ImuFactor::ImuFactor(Key pose_i, Key vel_i, Key pose_j, Key vel_j, Key bias, - const PreintegratedImuMeasurements& pim, const Vector3& n_gravity, - const Vector3& omegaCoriolis, const boost::optional& body_P_sensor, - const bool use2ndOrderCoriolis) : -Base(noiseModel::Gaussian::Covariance(pim.preintMeasCov_), pose_i, vel_i, - pose_j, vel_j, bias), _PIM_(pim) { - boost::shared_ptr p = boost::make_shared< - PreintegrationParams>(pim.p()); - p->n_gravity = n_gravity; - p->omegaCoriolis = omegaCoriolis; - p->body_P_sensor = body_P_sensor; - p->use2ndOrderCoriolis = use2ndOrderCoriolis; - _PIM_.p_ = p; -} - -void ImuFactor::Predict(const Pose3& pose_i, const Vector3& vel_i, - Pose3& pose_j, Vector3& vel_j, const imuBias::ConstantBias& bias_i, - PreintegratedImuMeasurements& pim, const Vector3& n_gravity, - const Vector3& omegaCoriolis, const bool use2ndOrderCoriolis) { - // use deprecated predict - PoseVelocityBias pvb = pim.predict(pose_i, vel_i, bias_i, n_gravity, - omegaCoriolis, use2ndOrderCoriolis); - pose_j = pvb.pose; - vel_j = pvb.velocity; -} -#endif //------------------------------------------------------------------------------ // ImuFactor2 methods //------------------------------------------------------------------------------ @@ -280,9 +226,9 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor2& f) { //------------------------------------------------------------------------------ void ImuFactor2::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "ImuFactor2(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) - << ")\n"; + cout << (s == "" ? s : s + "\n") << "ImuFactor2(" + << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << "," + << keyFormatter(this->key3()) << ")\n"; cout << *this << endl; } diff --git a/gtsam/navigation/ImuFactor.h b/gtsam/navigation/ImuFactor.h index 65142af24..51df3f24a 100644 --- a/gtsam/navigation/ImuFactor.h +++ b/gtsam/navigation/ImuFactor.h @@ -140,24 +140,7 @@ public: void mergeWith(const PreintegratedImuMeasurements& pim, Matrix9* H1, Matrix9* H2); #endif -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @deprecated constructor - /// NOTE(frank): assumes Z-Down convention, only second order integration supported - PreintegratedImuMeasurements(const imuBias::ConstantBias& biasHat, - const Matrix3& measuredAccCovariance, - const Matrix3& measuredOmegaCovariance, - const Matrix3& integrationErrorCovariance, - bool use2ndOrderIntegration = true); - - /// @deprecated version of integrateMeasurement with body_P_sensor - /// Use parameters instead - void integrateMeasurement(const Vector3& measuredAcc, - const Vector3& measuredOmega, double dt, - boost::optional body_P_sensor); -#endif - -private: - + private: /// Serialization function friend class boost::serialization::access; template @@ -222,8 +205,8 @@ public: /// @name Testable /// @{ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const ImuFactor&); - void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const override; + void print(const std::string& s = "", const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; /// @} @@ -253,27 +236,7 @@ public: static shared_ptr Merge(const shared_ptr& f01, const shared_ptr& f12); #endif -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @deprecated typename - typedef PreintegratedImuMeasurements PreintegratedMeasurements; - - /// @deprecated constructor, in the new one gravity, coriolis settings are in PreintegrationParams - ImuFactor(Key pose_i, Key vel_i, Key pose_j, Key vel_j, Key bias, - const PreintegratedMeasurements& preintegratedMeasurements, - const Vector3& n_gravity, const Vector3& omegaCoriolis, - const boost::optional& body_P_sensor = boost::none, - const bool use2ndOrderCoriolis = false); - - /// @deprecated use PreintegrationBase::predict, - /// in the new one gravity, coriolis settings are in PreintegrationParams - static void Predict(const Pose3& pose_i, const Vector3& vel_i, Pose3& pose_j, - Vector3& vel_j, const imuBias::ConstantBias& bias_i, - PreintegratedMeasurements& pim, const Vector3& n_gravity, - const Vector3& omegaCoriolis, const bool use2ndOrderCoriolis = false); -#endif - -private: - + private: /** Serialization function */ friend class boost::serialization::access; template @@ -320,8 +283,8 @@ public: /// @name Testable /// @{ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const ImuFactor2&); - void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const override; + void print(const std::string& s = "", const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; /// @} diff --git a/gtsam/navigation/MagFactor.h b/gtsam/navigation/MagFactor.h index 97a4c70ce..74e9177d5 100644 --- a/gtsam/navigation/MagFactor.h +++ b/gtsam/navigation/MagFactor.h @@ -73,7 +73,7 @@ public: * @brief vector of errors */ Vector evaluateError(const Rot2& nRb, - boost::optional H = boost::none) const { + boost::optional H = boost::none) const override { // measured bM = nRb� * nM + b Point3 hx = unrotate(nRb, nM_, H) + bias_; return (hx - measured_); @@ -111,7 +111,7 @@ public: * @brief vector of errors */ Vector evaluateError(const Rot3& nRb, - boost::optional H = boost::none) const { + boost::optional H = boost::none) const override { // measured bM = nRb� * nM + b Point3 hx = nRb.unrotate(nM_, H, boost::none) + bias_; return (hx - measured_); @@ -150,7 +150,7 @@ public: */ Vector evaluateError(const Point3& nM, const Point3& bias, boost::optional H1 = boost::none, boost::optional H2 = - boost::none) const { + boost::none) const override { // measured bM = nRb� * nM + b, where b is unknown bias Point3 hx = bRn_.rotate(nM, boost::none, H1) + bias; if (H2) @@ -192,7 +192,7 @@ public: Vector evaluateError(const double& scale, const Unit3& direction, const Point3& bias, boost::optional H1 = boost::none, boost::optional H2 = boost::none, boost::optional H3 = - boost::none) const { + boost::none) const override { // measured bM = nRb� * nM + b, where b is unknown bias Unit3 rotated = bRn_.rotate(direction, boost::none, H2); Point3 hx = scale * rotated.point3() + bias; diff --git a/gtsam/navigation/NavState.cpp b/gtsam/navigation/NavState.cpp index 50949c761..1d191416f 100644 --- a/gtsam/navigation/NavState.cpp +++ b/gtsam/navigation/NavState.cpp @@ -88,15 +88,15 @@ Matrix7 NavState::matrix() const { //------------------------------------------------------------------------------ ostream& operator<<(ostream& os, const NavState& state) { - os << "R:" << state.attitude(); - os << "p:" << state.position() << endl; - os << "v:" << Point3(state.velocity()) << endl; + os << "R: " << state.attitude() << "\n"; + os << "p: " << state.position() << "\n"; + os << "v: " << Point3(state.velocity()); return os; } //------------------------------------------------------------------------------ void NavState::print(const string& s) const { - cout << s << *this << endl; + cout << (s.empty() ? s : s + " ") << *this << endl; } //------------------------------------------------------------------------------ diff --git a/gtsam/navigation/PreintegratedRotation.cpp b/gtsam/navigation/PreintegratedRotation.cpp index c5d48b734..f827c7c59 100644 --- a/gtsam/navigation/PreintegratedRotation.cpp +++ b/gtsam/navigation/PreintegratedRotation.cpp @@ -26,12 +26,11 @@ using namespace std; namespace gtsam { void PreintegratedRotationParams::print(const string& s) const { - cout << s << endl; + cout << (s == "" ? s : s + "\n") << endl; cout << "gyroscopeCovariance:\n[\n" << gyroscopeCovariance << "\n]" << endl; if (omegaCoriolis) cout << "omegaCoriolis = (" << omegaCoriolis->transpose() << ")" << endl; - if (body_P_sensor) - body_P_sensor->print("body_P_sensor"); + if (body_P_sensor) body_P_sensor->print("body_P_sensor"); } bool PreintegratedRotationParams::equals( diff --git a/gtsam/navigation/PreintegrationBase.cpp b/gtsam/navigation/PreintegrationBase.cpp index 4af752ac0..111594663 100644 --- a/gtsam/navigation/PreintegrationBase.cpp +++ b/gtsam/navigation/PreintegrationBase.cpp @@ -46,7 +46,7 @@ ostream& operator<<(ostream& os, const PreintegrationBase& pim) { //------------------------------------------------------------------------------ void PreintegrationBase::print(const string& s) const { - cout << s << *this << endl; + cout << (s == "" ? s : s + "\n") << *this << endl; } //------------------------------------------------------------------------------ @@ -193,22 +193,6 @@ Vector9 PreintegrationBase::computeErrorAndJacobians(const Pose3& pose_i, return error; } -//------------------------------------------------------------------------------ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -PoseVelocityBias PreintegrationBase::predict(const Pose3& pose_i, - const Vector3& vel_i, const imuBias::ConstantBias& bias_i, - const Vector3& n_gravity, const Vector3& omegaCoriolis, - const bool use2ndOrderCoriolis) const { -// NOTE(frank): parameters are supposed to be constant, below is only provided for compatibility - boost::shared_ptr q = boost::make_shared(p()); - q->n_gravity = n_gravity; - q->omegaCoriolis = omegaCoriolis; - q->use2ndOrderCoriolis = use2ndOrderCoriolis; - p_ = q; - return PoseVelocityBias(predict(NavState(pose_i, vel_i), bias_i), bias_i); -} - -#endif //------------------------------------------------------------------------------ } // namespace gtsam diff --git a/gtsam/navigation/PreintegrationBase.h b/gtsam/navigation/PreintegrationBase.h index 29d7814b5..e118a6232 100644 --- a/gtsam/navigation/PreintegrationBase.h +++ b/gtsam/navigation/PreintegrationBase.h @@ -32,25 +32,6 @@ namespace gtsam { -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -/// @deprecated -struct PoseVelocityBias { - Pose3 pose; - Vector3 velocity; - imuBias::ConstantBias bias; - PoseVelocityBias(const Pose3& _pose, const Vector3& _velocity, - const imuBias::ConstantBias _bias) : - pose(_pose), velocity(_velocity), bias(_bias) { - } - PoseVelocityBias(const NavState& navState, const imuBias::ConstantBias _bias) : - pose(navState.pose()), velocity(navState.velocity()), bias(_bias) { - } - NavState navState() const { - return NavState(pose, velocity); - } -}; -#endif - /** * PreintegrationBase is the base class for PreintegratedMeasurements * (in ImuFactor) and CombinedPreintegratedMeasurements (in CombinedImuFactor). @@ -63,11 +44,6 @@ class GTSAM_EXPORT PreintegrationBase { typedef PreintegrationParams Params; protected: - /// Parameters. Declared mutable only for deprecated predict method. - /// TODO(frank): make const once deprecated method is removed -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - mutable -#endif boost::shared_ptr p_; /// Acceleration and gyro bias used for preintegration @@ -117,16 +93,11 @@ class GTSAM_EXPORT PreintegrationBase { } /// const reference to params - const Params& p() const { - return *boost::static_pointer_cast(p_); + Params& p() const { + return *p_; } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - void set_body_P_sensor(const Pose3& body_P_sensor) { - p_->body_P_sensor = body_P_sensor; - } -#endif -/// @} + /// @} /// @name Instance variables access /// @{ @@ -145,7 +116,7 @@ class GTSAM_EXPORT PreintegrationBase { /// @name Testable /// @{ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const PreintegrationBase& pim); - virtual void print(const std::string& s) const; + virtual void print(const std::string& s="") const; /// @} /// @name Main functionality @@ -201,18 +172,6 @@ class GTSAM_EXPORT PreintegrationBase { OptionalJacobian<9, 6> H3 = boost::none, OptionalJacobian<9, 3> H4 = boost::none, OptionalJacobian<9, 6> H5 = boost::none) const; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - - /// @deprecated predict - PoseVelocityBias predict(const Pose3& pose_i, const Vector3& vel_i, - const imuBias::ConstantBias& bias_i, const Vector3& n_gravity, - const Vector3& omegaCoriolis, const bool use2ndOrderCoriolis = false) const; - - /// @} -#endif - private: /** Serialization function */ friend class boost::serialization::access; diff --git a/gtsam/navigation/ScenarioRunner.h b/gtsam/navigation/ScenarioRunner.h index 649d0fe12..1577e36fe 100644 --- a/gtsam/navigation/ScenarioRunner.h +++ b/gtsam/navigation/ScenarioRunner.h @@ -104,15 +104,6 @@ class GTSAM_EXPORT ScenarioRunner { /// Estimate covariance of sampled noise for sanity-check Matrix6 estimateNoiseCovariance(size_t N = 1000) const; - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - ScenarioRunner(const Scenario* scenario, const SharedParams& p, - double imuSampleTime = 1.0 / 100.0, const Bias& bias = Bias()) - : ScenarioRunner(*scenario, p, imuSampleTime, bias) {} - /// @} -#endif }; } // namespace gtsam diff --git a/gtsam/navigation/tests/testNavState.cpp b/gtsam/navigation/tests/testNavState.cpp index 8ea8ce9b5..c07240752 100644 --- a/gtsam/navigation/tests/testNavState.cpp +++ b/gtsam/navigation/tests/testNavState.cpp @@ -163,37 +163,6 @@ TEST( NavState, Manifold ) { EXPECT(assert_equal(numericalDerivative22(local, state2, kIdentity), aH2)); } -/* ************************************************************************* */ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -TEST(NavState, Update) { - Vector3 omega(M_PI / 100.0, 0.0, 0.0); - Vector3 acc(0.1, 0.0, 0.0); - double dt = 10; - Matrix9 aF; - Matrix93 aG1, aG2; - boost::function update = - boost::bind(&NavState::update, _1, _2, _3, dt, boost::none, - boost::none, boost::none); - Vector3 b_acc = kAttitude * acc; - NavState expected(kAttitude.expmap(dt * omega), - kPosition + Point3((kVelocity + b_acc * dt / 2) * dt), - kVelocity + b_acc * dt); - NavState actual = kState1.update(acc, omega, dt, aF, aG1, aG2); - EXPECT(assert_equal(expected, actual)); - EXPECT(assert_equal(numericalDerivative31(update, kState1, acc, omega, 1e-7), aF, 1e-7)); - EXPECT(assert_equal(numericalDerivative32(update, kState1, acc, omega, 1e-7), aG1, 1e-7)); - EXPECT(assert_equal(numericalDerivative33(update, kState1, acc, omega, 1e-7), aG2, 1e-7)); - - // Try different values - omega = Vector3(0.1, 0.2, 0.3); - acc = Vector3(0.4, 0.5, 0.6); - kState1.update(acc, omega, dt, aF, aG1, aG2); - EXPECT(assert_equal(numericalDerivative31(update, kState1, acc, omega, 1e-7), aF, 1e-7)); - EXPECT(assert_equal(numericalDerivative32(update, kState1, acc, omega, 1e-7), aG1, 1e-7)); - EXPECT(assert_equal(numericalDerivative33(update, kState1, acc, omega, 1e-7), aG2, 1e-7)); -} -#endif - /* ************************************************************************* */ static const double dt = 2.0; boost::function coriolis = boost::bind( @@ -237,6 +206,25 @@ TEST(NavState, CorrectPIM) { EXPECT(assert_equal(numericalDerivative22(correctPIM, kState1, xi), aH2)); } +/* ************************************************************************* */ +TEST(NavState, Stream) +{ + NavState state; + + std::ostringstream os; + os << state; + + string expected; +#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS + expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\np: 0\n0\n0\nv: 0\n0\n0"; +#else + expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\np: [0, 0, 0]'\nv: [0, 0, 0]'"; +#endif + + EXPECT(os.str() == expected); +} + + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/navigation/tests/testPoseVelocityBias.cpp b/gtsam/navigation/tests/testPoseVelocityBias.cpp index 0b897bc6e..f3c36335b 100644 --- a/gtsam/navigation/tests/testPoseVelocityBias.cpp +++ b/gtsam/navigation/tests/testPoseVelocityBias.cpp @@ -25,40 +25,6 @@ using namespace std; using namespace gtsam; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - -// Should be seen as between(pvb1,pvb2), i.e., written as pvb2 \omin pvb1 -Vector9 error(const PoseVelocityBias& pvb1, const PoseVelocityBias& pvb2) { - Matrix3 R1 = pvb1.pose.rotation().matrix(); - // Ri.transpose() translate the error from the global frame into pose1's frame - const Vector3 fp = R1.transpose() * (pvb2.pose.translation() - pvb1.pose.translation()); - const Vector3 fv = R1.transpose() * (pvb2.velocity - pvb1.velocity); - const Rot3 R1BetweenR2 = pvb1.pose.rotation().between(pvb2.pose.rotation()); - const Vector3 fR = Rot3::Logmap(R1BetweenR2); - Vector9 r; - r << fp, fv, fR; - return r; -} - -/* ************************************************************************************************/ -TEST(PoseVelocityBias, error) { - Point3 i1(0, 1, 0), j1(-1, 0, 0), k1(0, 0, 1); - Pose3 x1(Rot3(i1, j1, k1), Point3(5.0, 1.0, 0.0)); - Vector3 v1(Vector3(0.5, 0.0, 0.0)); - imuBias::ConstantBias bias1(Vector3(0.2, 0, 0), Vector3(0.1, 0, 0.3)); - - Pose3 x2(Rot3(i1, j1, k1).expmap(Vector3(0.1, 0.2, 0.3)), Point3(5.5, 1.0, 6.0)); - Vector3 v2(Vector3(0.5, 4.0, 3.0)); - imuBias::ConstantBias bias2(Vector3(0.1, 0.2, -0.3), Vector3(0.2, 0.3, 0.1)); - - PoseVelocityBias pvb1(x1, v1, bias1), pvb2(x2, v2, bias2); - - Vector9 expected, actual = error(pvb1, pvb2); - expected << 0.0, -0.5, 6.0, 4.0, 0.0, 3.0, 0.1, 0.2, 0.3; - EXPECT(assert_equal(expected, actual, 1e-9)); -} -#endif - /* ************************************************************************************************/ int main() { TestResult tr; diff --git a/gtsam/nonlinear/ExpressionFactor.h b/gtsam/nonlinear/ExpressionFactor.h index 4bb8da685..6d6162825 100644 --- a/gtsam/nonlinear/ExpressionFactor.h +++ b/gtsam/nonlinear/ExpressionFactor.h @@ -19,9 +19,11 @@ #pragma once +#include +#include +#include #include #include -#include #include namespace gtsam { @@ -34,6 +36,9 @@ namespace gtsam { * such instances, the user should declare derived classes from this template, * implementing expresion(), serialize(), clone(), print(), and defining the * corresponding `struct traits : public Testable {}`. + * + * \tparam T Type for measurements. + * */ template class ExpressionFactor: public NoiseModelFactor { @@ -222,26 +227,98 @@ private: template struct traits > : public Testable > {}; +/** + * N-ary variadic template for ExpressionFactor meant as a base class for N-ary + * factors. Enforces an 'expression' method with N keys. + * Derived class (an N-factor!) needs to call 'initialize'. + * + * Does not provide backward compatible 'evaluateError'. + * + * \tparam T Type for measurements. The rest of template arguments are types + * for the N key-indexed Values. + * + */ +template +class ExpressionFactorN : public ExpressionFactor { +public: + static const std::size_t NARY_EXPRESSION_SIZE = sizeof...(Args); + using ArrayNKeys = std::array; + + /// Destructor + virtual ~ExpressionFactorN() = default; + + // Don't provide backward compatible evaluateVector(), due to its problematic + // variable length of optional Jacobian arguments. Vector evaluateError(const + // Args... args,...); + + /// Recreate expression from given keys_ and measured_, used in load + /// Needed to deserialize a derived factor + virtual Expression expression(const ArrayNKeys &keys) const { + throw std::runtime_error( + "ExpressionFactorN::expression not provided: cannot deserialize."); + } + +protected: + /// Default constructor, for serialization + ExpressionFactorN() = default; + + /// Constructor takes care of keys, but still need to call initialize + ExpressionFactorN(const ArrayNKeys &keys, const SharedNoiseModel &noiseModel, + const T &measurement) + : ExpressionFactor(noiseModel, measurement) { + for (const auto &key : keys) + Factor::keys_.push_back(key); + } + +private: + /// Return an expression that predicts the measurement given Values + Expression expression() const override { + ArrayNKeys keys; + int idx = 0; + for (const auto &key : Factor::keys_) + keys[idx++] = key; + return expression(keys); + } + + friend class boost::serialization::access; + template + void serialize(ARCHIVE &ar, const unsigned int /*version*/) { + ar &boost::serialization::make_nvp( + "ExpressionFactorN", + boost::serialization::base_object>(*this)); + } +}; +/// traits +template +struct traits> + : public Testable> {}; +// ExpressionFactorN + + +#if defined(GTSAM_ALLOW_DEPRECATED_SINCE_V41) /** * Binary specialization of ExpressionFactor meant as a base class for binary - * factors. Enforces an 'expression' method with two keys, and provides 'evaluateError'. - * Derived class (a binary factor!) needs to call 'initialize'. + * factors. Enforces an 'expression' method with two keys, and provides + * 'evaluateError'. Derived class (a binary factor!) needs to call 'initialize'. + * + * \sa ExpressionFactorN + * \deprecated Prefer the more general ExpressionFactorN<>. */ template -class ExpressionFactor2 : public ExpressionFactor { - public: +class ExpressionFactor2 : public ExpressionFactorN { +public: /// Destructor - virtual ~ExpressionFactor2() {} + ~ExpressionFactor2() override {} /// Backwards compatible evaluateError, to make existing tests compile - Vector evaluateError(const A1& a1, const A2& a2, - boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const { + Vector evaluateError(const A1 &a1, const A2 &a2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const { Values values; values.insert(this->keys_[0], a1); values.insert(this->keys_[1], a2); std::vector H(2); - Vector error = this->unwhitenedError(values, H); + Vector error = ExpressionFactor::unwhitenedError(values, H); if (H1) (*H1) = H[0]; if (H2) (*H2) = H[1]; return error; @@ -250,35 +327,25 @@ class ExpressionFactor2 : public ExpressionFactor { /// Recreate expression from given keys_ and measured_, used in load /// Needed to deserialize a derived factor virtual Expression expression(Key key1, Key key2) const { - throw std::runtime_error("ExpressionFactor2::expression not provided: cannot deserialize."); + throw std::runtime_error( + "ExpressionFactor2::expression not provided: cannot deserialize."); + } + virtual Expression + expression(const typename ExpressionFactorN::ArrayNKeys &keys) + const override { + return expression(keys[0], keys[1]); } - protected: +protected: /// Default constructor, for serialization ExpressionFactor2() {} /// Constructor takes care of keys, but still need to call initialize - ExpressionFactor2(Key key1, Key key2, - const SharedNoiseModel& noiseModel, - const T& measurement) - : ExpressionFactor(noiseModel, measurement) { - this->keys_.push_back(key1); - this->keys_.push_back(key2); - } - - private: - /// Return an expression that predicts the measurement given Values - Expression expression() const override { - return expression(this->keys_[0], this->keys_[1]); - } - - friend class boost::serialization::access; - template - void serialize(ARCHIVE& ar, const unsigned int /*version*/) { - ar& boost::serialization::make_nvp( - "ExpressionFactor", boost::serialization::base_object >(*this)); - } + ExpressionFactor2(Key key1, Key key2, const SharedNoiseModel &noiseModel, + const T &measurement) + : ExpressionFactorN({key1, key2}, noiseModel, measurement) {} }; // ExpressionFactor2 +#endif -}// \ namespace gtsam +} // namespace gtsam diff --git a/gtsam/nonlinear/ISAM2Clique.h b/gtsam/nonlinear/ISAM2Clique.h index 467741d66..9b40c02c6 100644 --- a/gtsam/nonlinear/ISAM2Clique.h +++ b/gtsam/nonlinear/ISAM2Clique.h @@ -51,6 +51,7 @@ class GTSAM_EXPORT ISAM2Clique /// Default constructor ISAM2Clique() : Base() {} + virtual ~ISAM2Clique() = default; /// Copy constructor, does *not* copy solution pointers as these are invalid /// in different trees. diff --git a/gtsam/nonlinear/LinearContainerFactor.h b/gtsam/nonlinear/LinearContainerFactor.h index 4540117b6..8587e6b91 100644 --- a/gtsam/nonlinear/LinearContainerFactor.h +++ b/gtsam/nonlinear/LinearContainerFactor.h @@ -140,21 +140,14 @@ public: * Utility function for converting linear graphs to nonlinear graphs * consisting of LinearContainerFactors. */ - GTSAM_EXPORT static NonlinearFactorGraph ConvertLinearGraph(const GaussianFactorGraph& linear_graph, + GTSAM_EXPORT + static NonlinearFactorGraph ConvertLinearGraph(const GaussianFactorGraph& linear_graph, const Values& linearizationPoint = Values()); -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - GTSAM_EXPORT static NonlinearFactorGraph convertLinearGraph(const GaussianFactorGraph& linear_graph, - const Values& linearizationPoint = Values()) { - return ConvertLinearGraph(linear_graph, linearizationPoint); - } -#endif - -protected: - GTSAM_EXPORT void initializeLinearizationPoint(const Values& linearizationPoint); - -private: + protected: + GTSAM_EXPORT void initializeLinearizationPoint(const Values& linearizationPoint); + private: /** Serialization function */ friend class boost::serialization::access; template diff --git a/gtsam/nonlinear/NonlinearFactor.h b/gtsam/nonlinear/NonlinearFactor.h index 7bbd51236..80fbfbb11 100644 --- a/gtsam/nonlinear/NonlinearFactor.h +++ b/gtsam/nonlinear/NonlinearFactor.h @@ -29,13 +29,6 @@ #include #include -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -#define ADD_CLONE_NONLINEAR_FACTOR(Derived) \ - gtsam::NonlinearFactor::shared_ptr clone() const override { \ - return boost::static_pointer_cast( \ - gtsam::NonlinearFactor::shared_ptr(new Derived(*this))); } -#endif - namespace gtsam { using boost::assign::cref_list_of; @@ -251,21 +244,13 @@ public: */ boost::shared_ptr linearize(const Values& x) const override; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - SharedNoiseModel get_noiseModel() const { return noiseModel_; } - /// @} -#endif - -private: - + private: /** Serialization function */ friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { ar & boost::serialization::make_nvp("NonlinearFactor", - boost::serialization::base_object(*this)); + boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(noiseModel_); } @@ -296,6 +281,8 @@ protected: typedef NoiseModelFactor1 This; public: + /// @name Constructors + /// @{ /** Default constructor for I/O only */ NoiseModelFactor1() {} @@ -309,16 +296,23 @@ public: * @param noiseModel shared pointer to noise model * @param key1 by which to look up X value in Values */ - NoiseModelFactor1(const SharedNoiseModel& noiseModel, Key key1) : - Base(noiseModel, cref_list_of<1>(key1)) {} + NoiseModelFactor1(const SharedNoiseModel &noiseModel, Key key1) + : Base(noiseModel, cref_list_of<1>(key1)) {} - /** Calls the 1-key specific version of evaluateError, which is pure virtual - * so must be implemented in the derived class. + /// @} + /// @name NoiseModelFactor methods + /// @{ + + /** + * Calls the 1-key specific version of evaluateError below, which is pure + * virtual so must be implemented in the derived class. */ - Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const override { - if(this->active(x)) { - const X& x1 = x.at(keys_[0]); - if(H) { + Vector unwhitenedError( + const Values &x, + boost::optional &> H = boost::none) const override { + if (this->active(x)) { + const X &x1 = x.at(keys_[0]); + if (H) { return evaluateError(x1, (*H)[0]); } else { return evaluateError(x1); @@ -328,16 +322,22 @@ public: } } + /// @} + /// @name Virtual methods + /// @{ + /** * Override this method to finish implementing a unary factor. * If the optional Matrix reference argument is specified, it should compute * both the function evaluation and its derivative in X. */ - virtual Vector evaluateError(const X& x, boost::optional H = - boost::none) const = 0; + virtual Vector + evaluateError(const X &x, + boost::optional H = boost::none) const = 0; + + /// @} private: - /** Serialization function */ friend class boost::serialization::access; template diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 328a3facf..9a9c487b6 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -90,7 +90,7 @@ void NonlinearOptimizer::defaultOptimize() { // Iterative loop do { // Do next iteration - currentError = error(); + currentError = error(); // TODO(frank): don't do this twice at first !? Computed above! iterate(); tictoc_finishedIteration(); diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 103c231be..9935f44ce 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -105,14 +105,17 @@ public: */ const Values& optimizeSafely(); - /// return error + /// return error in current optimizer state double error() const; - /// return number of iterations + /// return number of iterations in current optimizer state size_t iterations() const; - /// return values - const Values& values() const; + /// return values in current optimizer state + const Values &values() const; + + /// return the graph with nonlinear factors + const NonlinearFactorGraph &graph() const { return graph_; } /// @} diff --git a/gtsam/nonlinear/Values.cpp b/gtsam/nonlinear/Values.cpp index 7e13a072a..b672031ca 100644 --- a/gtsam/nonlinear/Values.cpp +++ b/gtsam/nonlinear/Values.cpp @@ -52,6 +52,12 @@ namespace gtsam { Values::Values(Values&& other) : values_(std::move(other.values_)) { } + /* ************************************************************************* */ + Values::Values(std::initializer_list init) { + for (const auto &kv : init) + insert(kv.key, kv.value); + } + /* ************************************************************************* */ Values::Values(const Values& other, const VectorValues& delta) { for (const_iterator key_value = other.begin(); key_value != other.end(); ++key_value) { @@ -69,7 +75,8 @@ namespace gtsam { /* ************************************************************************* */ void Values::print(const string& str, const KeyFormatter& keyFormatter) const { - cout << str << "Values with " << size() << " values:" << endl; + cout << str << (str == "" ? "" : "\n"); + cout << "Values with " << size() << " values:\n"; for(const_iterator key_value = begin(); key_value != end(); ++key_value) { cout << "Value " << keyFormatter(key_value->key) << ": "; key_value->value.print(""); diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index bc64f2612..b49188b68 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -149,6 +149,13 @@ namespace gtsam { /** Move constructor */ Values(Values&& other); + + /** Constructor from initializer list. Example usage: + * \code + * Values v = {{k1, genericValue(pose1)}, {k2, genericValue(point2)}}; + * \endcode + */ + Values(std::initializer_list init); /** Construct from a Values and an update vector: identical to other.retract(delta) */ Values(const Values& other, const VectorValues& delta); @@ -390,7 +397,7 @@ namespace gtsam { template size_t count() const { size_t i = 0; - for (const auto& key_value : *this) { + for (const auto key_value : *this) { if (dynamic_cast*>(&key_value.value)) ++i; } diff --git a/gtsam/nonlinear/tests/testCallRecord.cpp b/gtsam/nonlinear/tests/testCallRecord.cpp index c5ccc0f52..66c56e696 100644 --- a/gtsam/nonlinear/tests/testCallRecord.cpp +++ b/gtsam/nonlinear/tests/testCallRecord.cpp @@ -98,7 +98,8 @@ struct Record: public internal::CallRecordImplementor { friend struct internal::CallRecordImplementor; }; -internal::JacobianMap & NJM= *static_cast(nullptr); +internal::JacobianMap* NJM_ptr = static_cast(nullptr); +internal::JacobianMap & NJM = *NJM_ptr; /* ************************************************************************* */ typedef Eigen::Matrix DynRowMat; diff --git a/gtsam/nonlinear/tests/testFunctorizedFactor.cpp b/gtsam/nonlinear/tests/testFunctorizedFactor.cpp index 12dd6b91c..48ab73ad0 100644 --- a/gtsam/nonlinear/tests/testFunctorizedFactor.cpp +++ b/gtsam/nonlinear/tests/testFunctorizedFactor.cpp @@ -131,7 +131,7 @@ TEST(FunctorizedFactor, Print) { "FunctorizedFactor(X0)\n" " measurement: [\n" " 1, 0;\n" - " 0, 1\n" + " 0, 1\n" "]\n" " noise model sigmas: 1 1 1 1 1 1 1 1 1\n"; diff --git a/gtsam/nonlinear/tests/testValues.cpp b/gtsam/nonlinear/tests/testValues.cpp index 2f624f527..09b358efb 100644 --- a/gtsam/nonlinear/tests/testValues.cpp +++ b/gtsam/nonlinear/tests/testValues.cpp @@ -593,7 +593,7 @@ TEST(Values, Demangle) { Values values; Matrix13 v; v << 5.0, 6.0, 7.0; values.insert(key1, v); - string expected = "Values with 1 values:\nValue v1: (Eigen::Matrix) [\n 5, 6, 7\n]\n\n"; + string expected = "Values with 1 values:\nValue v1: (Eigen::Matrix)\n[\n 5, 6, 7\n]\n\n"; stringstream buffer; streambuf * old = cout.rdbuf(buffer.rdbuf()); @@ -606,6 +606,36 @@ TEST(Values, Demangle) { EXPECT(assert_equal(expected, actual)); } +/* ************************************************************************* */ +TEST(Values, brace_initializer) { + const Pose2 poseA(1.0, 2.0, 0.3), poseC(.0, .0, .0); + const Pose3 poseB(Pose2(0.1, 0.2, 0.3)); + + { + Values values; + EXPECT_LONGS_EQUAL(0, values.size()); + values = { {key1, genericValue(1.0)} }; + EXPECT_LONGS_EQUAL(1, values.size()); + CHECK(values.at(key1) == 1.0); + } + { + Values values = { {key1, genericValue(poseA)}, {key2, genericValue(poseB)} }; + EXPECT_LONGS_EQUAL(2, values.size()); + EXPECT(assert_equal(values.at(key1), poseA)); + EXPECT(assert_equal(values.at(key2), poseB)); + } + // Test exception: duplicated key: + { + Values values; + CHECK_EXCEPTION((values = { + {key1, genericValue(poseA)}, + {key2, genericValue(poseB)}, + {key1, genericValue(poseC)} + }), std::exception); + } +} + + /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ diff --git a/gtsam/sam/BearingFactor.h b/gtsam/sam/BearingFactor.h index 530fcfdcd..7076708db 100644 --- a/gtsam/sam/BearingFactor.h +++ b/gtsam/sam/BearingFactor.h @@ -34,8 +34,8 @@ struct Bearing; */ template ::result_type> -struct BearingFactor : public ExpressionFactor2 { - typedef ExpressionFactor2 Base; +struct BearingFactor : public ExpressionFactorN { + typedef ExpressionFactorN Base; /// default constructor BearingFactor() {} @@ -43,14 +43,14 @@ struct BearingFactor : public ExpressionFactor2 { /// primary constructor BearingFactor(Key key1, Key key2, const T& measured, const SharedNoiseModel& model) - : Base(key1, key2, model, measured) { - this->initialize(expression(key1, key2)); + : Base({key1, key2}, model, measured) { + this->initialize(expression({key1, key2})); } // Return measurement expression - Expression expression(Key key1, Key key2) const override { - Expression a1_(key1); - Expression a2_(key2); + Expression expression(const typename Base::ArrayNKeys &keys) const override { + Expression a1_(keys[0]); + Expression a2_(keys[1]); return Expression(Bearing(), a1_, a2_); } @@ -60,6 +60,21 @@ struct BearingFactor : public ExpressionFactor2 { std::cout << s << "BearingFactor" << std::endl; Base::print(s, kf); } + + Vector evaluateError(const A1& a1, const A2& a2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const + { + std::vector Hs(2); + const auto &keys = Factor::keys(); + const Vector error = unwhitenedError( + {{keys[0], genericValue(a1)}, {keys[1], genericValue(a2)}}, + Hs); + if (H1) *H1 = Hs[0]; + if (H2) *H2 = Hs[1]; + return error; + } + private: friend class boost::serialization::access; diff --git a/gtsam/sam/BearingRangeFactor.h b/gtsam/sam/BearingRangeFactor.h index be645ef94..482dbb974 100644 --- a/gtsam/sam/BearingRangeFactor.h +++ b/gtsam/sam/BearingRangeFactor.h @@ -33,23 +33,30 @@ template ::result_type, typename R = typename Range::result_type> class BearingRangeFactor - : public ExpressionFactor2, A1, A2> { + : public ExpressionFactorN, A1, A2> { private: typedef BearingRange T; - typedef ExpressionFactor2 Base; + typedef ExpressionFactorN Base; typedef BearingRangeFactor This; public: typedef boost::shared_ptr shared_ptr; - /// default constructor + /// Default constructor BearingRangeFactor() {} - /// primary constructor - BearingRangeFactor(Key key1, Key key2, const B& measuredBearing, - const R& measuredRange, const SharedNoiseModel& model) - : Base(key1, key2, model, T(measuredBearing, measuredRange)) { - this->initialize(expression(key1, key2)); + /// Construct from BearingRange instance + BearingRangeFactor(Key key1, Key key2, const T &bearingRange, + const SharedNoiseModel &model) + : Base({{key1, key2}}, model, T(bearingRange)) { + this->initialize(expression({{key1, key2}})); + } + + /// Construct from separate bearing and range + BearingRangeFactor(Key key1, Key key2, const B &measuredBearing, + const R &measuredRange, const SharedNoiseModel &model) + : Base({{key1, key2}}, model, T(measuredBearing, measuredRange)) { + this->initialize(expression({{key1, key2}})); } virtual ~BearingRangeFactor() {} @@ -61,9 +68,23 @@ class BearingRangeFactor } // Return measurement expression - Expression expression(Key key1, Key key2) const override { - return Expression(T::Measure, Expression(key1), - Expression(key2)); + Expression expression(const typename Base::ArrayNKeys& keys) const override { + return Expression(T::Measure, Expression(keys[0]), + Expression(keys[1])); + } + + Vector evaluateError(const A1& a1, const A2& a2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const + { + std::vector Hs(2); + const auto &keys = Factor::keys(); + const Vector error = unwhitenedError( + {{keys[0], genericValue(a1)}, {keys[1], genericValue(a2)}}, + Hs); + if (H1) *H1 = Hs[0]; + if (H2) *H2 = Hs[1]; + return error; } /// print diff --git a/gtsam/sam/RangeFactor.h b/gtsam/sam/RangeFactor.h index 80b02404e..d9890d2ef 100644 --- a/gtsam/sam/RangeFactor.h +++ b/gtsam/sam/RangeFactor.h @@ -32,18 +32,18 @@ struct Range; * @addtogroup SAM */ template -class RangeFactor : public ExpressionFactor2 { +class RangeFactor : public ExpressionFactorN { private: typedef RangeFactor This; - typedef ExpressionFactor2 Base; + typedef ExpressionFactorN Base; public: /// default constructor RangeFactor() {} RangeFactor(Key key1, Key key2, T measured, const SharedNoiseModel& model) - : Base(key1, key2, model, measured) { - this->initialize(expression(key1, key2)); + : Base({key1, key2}, model, measured) { + this->initialize(expression({key1, key2})); } /// @return a deep copy of this factor @@ -53,11 +53,25 @@ class RangeFactor : public ExpressionFactor2 { } // Return measurement expression - Expression expression(Key key1, Key key2) const override { - Expression a1_(key1); - Expression a2_(key2); + Expression expression(const typename Base::ArrayNKeys& keys) const override { + Expression a1_(keys[0]); + Expression a2_(keys[1]); return Expression(Range(), a1_, a2_); } + + Vector evaluateError(const A1& a1, const A2& a2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const + { + std::vector Hs(2); + const auto &keys = Factor::keys(); + const Vector error = Base::unwhitenedError( + {{keys[0], genericValue(a1)}, {keys[1], genericValue(a2)}}, + Hs); + if (H1) *H1 = Hs[0]; + if (H2) *H2 = Hs[1]; + return error; + } /// print void print(const std::string& s = "", @@ -86,10 +100,10 @@ struct traits > */ template ::result_type> -class RangeFactorWithTransform : public ExpressionFactor2 { +class RangeFactorWithTransform : public ExpressionFactorN { private: typedef RangeFactorWithTransform This; - typedef ExpressionFactor2 Base; + typedef ExpressionFactorN Base; A1 body_T_sensor_; ///< The pose of the sensor in the body frame @@ -100,8 +114,8 @@ class RangeFactorWithTransform : public ExpressionFactor2 { RangeFactorWithTransform(Key key1, Key key2, T measured, const SharedNoiseModel& model, const A1& body_T_sensor) - : Base(key1, key2, model, measured), body_T_sensor_(body_T_sensor) { - this->initialize(expression(key1, key2)); + : Base({key1, key2}, model, measured), body_T_sensor_(body_T_sensor) { + this->initialize(expression({key1, key2})); } virtual ~RangeFactorWithTransform() {} @@ -113,15 +127,29 @@ class RangeFactorWithTransform : public ExpressionFactor2 { } // Return measurement expression - Expression expression(Key key1, Key key2) const override { + Expression expression(const typename Base::ArrayNKeys& keys) const override { Expression body_T_sensor__(body_T_sensor_); - Expression nav_T_body_(key1); + Expression nav_T_body_(keys[0]); Expression nav_T_sensor_(traits::Compose, nav_T_body_, body_T_sensor__); - Expression a2_(key2); + Expression a2_(keys[1]); return Expression(Range(), nav_T_sensor_, a2_); } + Vector evaluateError(const A1& a1, const A2& a2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const + { + std::vector Hs(2); + const auto &keys = Factor::keys(); + const Vector error = Base::unwhitenedError( + {{keys[0], genericValue(a1)}, {keys[1], genericValue(a2)}}, + Hs); + if (H1) *H1 = Hs[0]; + if (H2) *H2 = Hs[1]; + return error; + } + /** print contents */ void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { @@ -135,9 +163,12 @@ class RangeFactorWithTransform : public ExpressionFactor2 { friend class boost::serialization::access; template void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + // **IMPORTANT** We need to (de)serialize parameters before the base class, + // since it calls expression() and we need all parameters ready at that + // point. + ar& BOOST_SERIALIZATION_NVP(body_T_sensor_); ar& boost::serialization::make_nvp( "Base", boost::serialization::base_object(*this)); - ar& BOOST_SERIALIZATION_NVP(body_T_sensor_); } }; // \ RangeFactorWithTransform diff --git a/gtsam/sam/tests/testBearingFactor.cpp b/gtsam/sam/tests/testBearingFactor.cpp index 12635a7e5..17a049a1d 100644 --- a/gtsam/sam/tests/testBearingFactor.cpp +++ b/gtsam/sam/tests/testBearingFactor.cpp @@ -68,7 +68,7 @@ TEST(BearingFactor, 2D) { values.insert(poseKey, Pose2(1.0, 2.0, 0.57)); values.insert(pointKey, Point2(-4.0, 11.0)); - EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression(poseKey, pointKey), + EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression({poseKey, pointKey}), values, 1e-7, 1e-5); EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); } @@ -104,7 +104,7 @@ TEST(BearingFactor, Serialization3D) { // values.insert(poseKey, Pose3()); // values.insert(pointKey, Point3(1, 0, 0)); // -// EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression(poseKey, pointKey), +// EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression({poseKey, pointKey}), // values, 1e-7, 1e-5); // EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); //} diff --git a/gtsam/sam/tests/testBearingRangeFactor.cpp b/gtsam/sam/tests/testBearingRangeFactor.cpp index 4c7a9ab91..735358d89 100644 --- a/gtsam/sam/tests/testBearingRangeFactor.cpp +++ b/gtsam/sam/tests/testBearingRangeFactor.cpp @@ -67,7 +67,7 @@ TEST(BearingRangeFactor, 2D) { values.insert(poseKey, Pose2(1.0, 2.0, 0.57)); values.insert(pointKey, Point2(-4.0, 11.0)); - EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression(poseKey, pointKey), + EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression({poseKey, pointKey}), values, 1e-7, 1e-5); EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); } @@ -95,7 +95,7 @@ TEST(BearingRangeFactor, Serialization3D) { // values.insert(poseKey, Pose3()); // values.insert(pointKey, Point3(1, 0, 0)); // -// EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression(poseKey, pointKey), +// EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression({poseKey, pointKey}), // values, 1e-7, 1e-5); // EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); //} diff --git a/gtsam/sam/tests/testRangeFactor.cpp b/gtsam/sam/tests/testRangeFactor.cpp index 54d4a43c0..8ae3d818b 100644 --- a/gtsam/sam/tests/testRangeFactor.cpp +++ b/gtsam/sam/tests/testRangeFactor.cpp @@ -141,6 +141,27 @@ TEST( RangeFactor, EqualsWithTransform ) { body_P_sensor_3D); CHECK(assert_equal(factor3D_1, factor3D_2)); } +/* ************************************************************************* */ +TEST( RangeFactor, EqualsAfterDeserializing) { + // Check that the same results are obtained after deserializing: + Pose3 body_P_sensor_3D(Rot3::RzRyRx(-M_PI_2, 0.0, -M_PI_2), + Point3(0.25, -0.10, 1.0)); + + RangeFactorWithTransform3D factor3D_1(poseKey, pointKey, measurement, model, + body_P_sensor_3D), factor3D_2; + + // check with Equal() trait: + gtsam::serializationTestHelpers::roundtripXML(factor3D_1, factor3D_2); + CHECK(assert_equal(factor3D_1, factor3D_2)); + + const Pose3 pose(Rot3::RzRyRx(0.2, -0.3, 1.75), Point3(1.0, 2.0, -3.0)); + const Point3 point(-2.0, 11.0, 1.0); + const Values values = {{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}}; + + const Vector error_1 = factor3D_1.unwhitenedError(values); + const Vector error_2 = factor3D_2.unwhitenedError(values); + CHECK(assert_equal(error_1, error_2)); +} /* ************************************************************************* */ TEST( RangeFactor, Error2D ) { @@ -152,7 +173,7 @@ TEST( RangeFactor, Error2D ) { Point2 point(-4.0, 11.0); // Use the factor to calculate the error - Vector actualError(factor.evaluateError(pose, point)); + Vector actualError(factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}})); // The expected error is ||(5.0, 9.0)|| - 10.0 = 0.295630141 meter / UnitCovariance Vector expectedError = (Vector(1) << 0.295630141).finished(); @@ -175,7 +196,7 @@ TEST( RangeFactor, Error2DWithTransform ) { Point2 point(-4.0, 11.0); // Use the factor to calculate the error - Vector actualError(factor.evaluateError(pose, point)); + Vector actualError(factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}})); // The expected error is ||(5.0, 9.0)|| - 10.0 = 0.295630141 meter / UnitCovariance Vector expectedError = (Vector(1) << 0.295630141).finished(); @@ -194,7 +215,7 @@ TEST( RangeFactor, Error3D ) { Point3 point(-2.0, 11.0, 1.0); // Use the factor to calculate the error - Vector actualError(factor.evaluateError(pose, point)); + Vector actualError(factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}})); // The expected error is ||(3.0, 9.0, 4.0)|| - 10.0 = 0.295630141 meter / UnitCovariance Vector expectedError = (Vector(1) << 0.295630141).finished(); @@ -218,7 +239,7 @@ TEST( RangeFactor, Error3DWithTransform ) { Point3 point(-2.0, 11.0, 1.0); // Use the factor to calculate the error - Vector actualError(factor.evaluateError(pose, point)); + Vector actualError(factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}})); // The expected error is ||(3.0, 9.0, 4.0)|| - 10.0 = 0.295630141 meter / UnitCovariance Vector expectedError = (Vector(1) << 0.295630141).finished(); @@ -266,8 +287,10 @@ TEST( RangeFactor, Jacobian2DWithTransform ) { Point2 point(-4.0, 11.0); // Use the factor to calculate the Jacobians - Matrix H1Actual, H2Actual; - factor.evaluateError(pose, point, H1Actual, H2Actual); + std::vector actualHs(2); + factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}}, actualHs); + const Matrix& H1Actual = actualHs.at(0); + const Matrix& H2Actual = actualHs.at(1); // Use numerical derivatives to calculate the Jacobians Matrix H1Expected, H2Expected; @@ -291,8 +314,10 @@ TEST( RangeFactor, Jacobian3D ) { Point3 point(-2.0, 11.0, 1.0); // Use the factor to calculate the Jacobians - Matrix H1Actual, H2Actual; - factor.evaluateError(pose, point, H1Actual, H2Actual); + std::vector actualHs(2); + factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}}, actualHs); + const Matrix& H1Actual = actualHs.at(0); + const Matrix& H2Actual = actualHs.at(1); // Use numerical derivatives to calculate the Jacobians Matrix H1Expected, H2Expected; @@ -321,8 +346,10 @@ TEST( RangeFactor, Jacobian3DWithTransform ) { Point3 point(-2.0, 11.0, 1.0); // Use the factor to calculate the Jacobians - Matrix H1Actual, H2Actual; - factor.evaluateError(pose, point, H1Actual, H2Actual); + std::vector actualHs(2); + factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}}, actualHs); + const Matrix& H1Actual = actualHs.at(0); + const Matrix& H2Actual = actualHs.at(1); // Use numerical derivatives to calculate the Jacobians Matrix H1Expected, H2Expected; @@ -350,7 +377,7 @@ TEST(RangeFactor, Point3) { Vector expectedError = (Vector(1) << 0.295630141).finished(); // Verify we get the expected error - CHECK(assert_equal(expectedError, factor.evaluateError(pose, point), 1e-9)); + CHECK(assert_equal(expectedError, factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}}), 1e-9)); } /* ************************************************************************* */ @@ -393,7 +420,7 @@ TEST(RangeFactor, NonGTSAM) { Vector expectedError = (Vector(1) << 0.295630141).finished(); // Verify we get the expected error - CHECK(assert_equal(expectedError, factor.evaluateError(pose, point), 1e-9)); + CHECK(assert_equal(expectedError, factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}}), 1e-9)); } /* ************************************************************************* */ diff --git a/gtsam/sfm/BinaryMeasurement.h b/gtsam/sfm/BinaryMeasurement.h new file mode 100644 index 000000000..c525c1b9e --- /dev/null +++ b/gtsam/sfm/BinaryMeasurement.h @@ -0,0 +1,83 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +#pragma once + +/** + * @file BinaryMeasurement.h + * @author Akshay Krishnan + * @date July 2020 + * @brief Binary measurement represents a measurement between two keys in a + * graph. A binary measurement is similar to a BetweenFactor, except that it + * does not contain an error function. It is a measurement (along with a noise + * model) from one key to another. Note that the direction is important. A + * measurement from key1 to key2 is not the same as the same measurement from + * key2 to key1. + */ + +#include +#include +#include +#include + +#include +#include + +namespace gtsam { + +template class BinaryMeasurement : public Factor { + // Check that T type is testable + BOOST_CONCEPT_ASSERT((IsTestable)); + +public: + // shorthand for a smart pointer to a measurement + using shared_ptr = typename boost::shared_ptr; + +private: + T measured_; ///< The measurement + SharedNoiseModel noiseModel_; ///< Noise model + +public: + BinaryMeasurement(Key key1, Key key2, const T &measured, + const SharedNoiseModel &model = nullptr) + : Factor(std::vector({key1, key2})), measured_(measured), + noiseModel_(model) {} + + /// @name Standard Interface + /// @{ + + Key key1() const { return keys_[0]; } + Key key2() const { return keys_[1]; } + const T &measured() const { return measured_; } + const SharedNoiseModel &noiseModel() const { return noiseModel_; } + + /// @} + /// @name Testable + /// @{ + + void print(const std::string &s, + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const { + std::cout << s << "BinaryMeasurement(" << keyFormatter(this->key1()) << "," + << keyFormatter(this->key2()) << ")\n"; + traits::Print(measured_, " measured: "); + this->noiseModel_->print(" noise model: "); + } + + bool equals(const BinaryMeasurement &expected, double tol = 1e-9) const { + const BinaryMeasurement *e = + dynamic_cast *>(&expected); + return e != nullptr && Factor::equals(*e) && + traits::Equals(this->measured_, e->measured_, tol) && + noiseModel_->equals(*expected.noiseModel()); + } + /// @} +}; +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sfm/ShonanAveraging.cpp b/gtsam/sfm/ShonanAveraging.cpp new file mode 100644 index 000000000..d4f189b0b --- /dev/null +++ b/gtsam/sfm/ShonanAveraging.cpp @@ -0,0 +1,841 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanAveraging.cpp + * @date March 2019 - August 2020 + * @author Frank Dellaert, David Rosen, and Jing Wu + * @brief Shonan Averaging algorithm + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace gtsam { + +// In Wrappers we have no access to this so have a default ready +static std::mt19937 kRandomNumberGenerator(42); + +using Sparse = Eigen::SparseMatrix; + +/* ************************************************************************* */ +template +ShonanAveragingParameters::ShonanAveragingParameters( + const LevenbergMarquardtParams &_lm, const std::string &method, + double optimalityThreshold, double alpha, double beta, double gamma) + : lm(_lm), optimalityThreshold(optimalityThreshold), alpha(alpha), + beta(beta), gamma(gamma) { + // By default, we will do conjugate gradient + lm.linearSolverType = LevenbergMarquardtParams::Iterative; + + // Create subgraph builder parameters + SubgraphBuilderParameters builderParameters; + builderParameters.skeletonType = SubgraphBuilderParameters::KRUSKAL; + builderParameters.skeletonWeight = SubgraphBuilderParameters::EQUAL; + builderParameters.augmentationWeight = SubgraphBuilderParameters::SKELETON; + builderParameters.augmentationFactor = 0.0; + + auto pcg = boost::make_shared(); + + // Choose optimization method + if (method == "SUBGRAPH") { + lm.iterativeParams = + boost::make_shared(builderParameters); + } else if (method == "SGPC") { + pcg->preconditioner_ = + boost::make_shared(builderParameters); + lm.iterativeParams = pcg; + } else if (method == "JACOBI") { + pcg->preconditioner_ = + boost::make_shared(); + lm.iterativeParams = pcg; + } else if (method == "QR") { + lm.setLinearSolverType("MULTIFRONTAL_QR"); + } else if (method == "CHOLESKY") { + lm.setLinearSolverType("MULTIFRONTAL_CHOLESKY"); + } else { + throw std::invalid_argument("ShonanAveragingParameters: unknown method"); + } +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 and d=3 +template struct ShonanAveragingParameters<2>; +template struct ShonanAveragingParameters<3>; + +/* ************************************************************************* */ +// Calculate number of unknown rotations referenced by measurements +template +static size_t +NrUnknowns(const typename ShonanAveraging::Measurements &measurements) { + std::set keys; + for (const auto &measurement : measurements) { + keys.insert(measurement.key1()); + keys.insert(measurement.key2()); + } + return keys.size(); +} + +/* ************************************************************************* */ +template +ShonanAveraging::ShonanAveraging(const Measurements &measurements, + const Parameters ¶meters) + : parameters_(parameters), measurements_(measurements), + nrUnknowns_(NrUnknowns(measurements)) { + for (const auto &measurement : measurements_) { + const auto &model = measurement.noiseModel(); + if (model && model->dim() != SO::dimension) { + measurement.print("Factor with incorrect noise model:\n"); + throw std::invalid_argument("ShonanAveraging: measurements passed to " + "constructor have incorrect dimension."); + } + } + Q_ = buildQ(); + D_ = buildD(); + L_ = D_ - Q_; +} + +/* ************************************************************************* */ +template +NonlinearFactorGraph ShonanAveraging::buildGraphAt(size_t p) const { + NonlinearFactorGraph graph; + auto G = boost::make_shared(SO<-1>::VectorizedGenerators(p)); + for (const auto &measurement : measurements_) { + const auto &keys = measurement.keys(); + const auto &Rij = measurement.measured(); + const auto &model = measurement.noiseModel(); + graph.emplace_shared>(keys[0], keys[1], Rij, p, model, G); + } + + // Possibly add Karcher prior + if (parameters_.beta > 0) { + const size_t dim = SOn::Dimension(p); + graph.emplace_shared>(graph.keys(), dim); + } + + // Possibly add gauge factors - they are probably useless as gradient is zero + if (parameters_.gamma > 0 && p > d + 1) { + for (auto key : graph.keys()) + graph.emplace_shared(key, p, d, parameters_.gamma); + } + + return graph; +} + +/* ************************************************************************* */ +template +double ShonanAveraging::costAt(size_t p, const Values &values) const { + const NonlinearFactorGraph graph = buildGraphAt(p); + return graph.error(values); +} + +/* ************************************************************************* */ +template +boost::shared_ptr +ShonanAveraging::createOptimizerAt(size_t p, const Values &initial) const { + // Build graph + NonlinearFactorGraph graph = buildGraphAt(p); + + // Anchor prior is added here as depends on initial value (and cost is zero) + if (parameters_.alpha > 0) { + size_t i; + Rot value; + const size_t dim = SOn::Dimension(p); + std::tie(i, value) = parameters_.anchor; + auto model = noiseModel::Isotropic::Precision(dim, parameters_.alpha); + graph.emplace_shared>(i, SOn::Lift(p, value.matrix()), + model); + } + + // Optimize + return boost::make_shared(graph, initial, + parameters_.lm); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::tryOptimizingAt(size_t p, + const Values &initial) const { + auto lm = createOptimizerAt(p, initial); + return lm->optimize(); +} + +/* ************************************************************************* */ +// Project to pxdN Stiefel manifold +template +Matrix ShonanAveraging::StiefelElementMatrix(const Values &values) { + const size_t N = values.size(); + const size_t p = values.at(0).rows(); + Matrix S(p, N * d); + for (const auto it : values.filter()) { + S.middleCols(it.key * d) = + it.value.matrix().leftCols(); // project Qj to Stiefel + } + return S; +} + +/* ************************************************************************* */ +template <> +Values ShonanAveraging<2>::projectFrom(size_t p, const Values &values) const { + Values result; + for (const auto it : values.filter()) { + assert(it.value.rows() == p); + const auto &M = it.value.matrix(); + const Rot2 R = Rot2::atan2(M(1, 0), M(0, 0)); + result.insert(it.key, R); + } + return result; +} + +template <> +Values ShonanAveraging<3>::projectFrom(size_t p, const Values &values) const { + Values result; + for (const auto it : values.filter()) { + assert(it.value.rows() == p); + const auto &M = it.value.matrix(); + const Rot3 R = Rot3::ClosestTo(M.topLeftCorner<3, 3>()); + result.insert(it.key, R); + } + return result; +} + +/* ************************************************************************* */ +template static Matrix RoundSolutionS(const Matrix &S) { + const size_t N = S.cols() / d; + // First, compute a thin SVD of S + Eigen::JacobiSVD svd(S, Eigen::ComputeThinV); + const Vector sigmas = svd.singularValues(); + + // Construct a diagonal matrix comprised of the first d singular values + using DiagonalMatrix = Eigen::DiagonalMatrix; + DiagonalMatrix Sigma_d; + Sigma_d.diagonal() = sigmas.head(); + + // Now, construct a rank-d truncated singular value decomposition for S + Matrix R = Sigma_d * svd.matrixV().leftCols().transpose(); + + // Count the number of blocks whose determinants have positive sign + size_t numPositiveBlocks = 0; + for (size_t i = 0; i < N; ++i) { + // Compute the determinant of the ith dxd block of R + double determinant = R.middleCols(d * i).determinant(); + if (determinant > 0) + ++numPositiveBlocks; + } + + if (numPositiveBlocks < N / 2) { + // Less than half of the total number of blocks have the correct sign. + // To reverse their orientations, multiply with a reflection matrix. + DiagonalMatrix reflector; + reflector.setIdentity(); + reflector.diagonal()(d - 1) = -1; + R = reflector * R; + } + + return R; +} + +/* ************************************************************************* */ +template <> Values ShonanAveraging<2>::roundSolutionS(const Matrix &S) const { + // Round to a 2*2N matrix + Matrix R = RoundSolutionS<2>(S); + + // Finally, project each dxd rotation block to SO(2) + Values values; + for (size_t j = 0; j < nrUnknowns(); ++j) { + const Rot2 Ri = Rot2::atan2(R(1, 2 * j), R(0, 2 * j)); + values.insert(j, Ri); + } + return values; +} + +template <> Values ShonanAveraging<3>::roundSolutionS(const Matrix &S) const { + // Round to a 3*3N matrix + Matrix R = RoundSolutionS<3>(S); + + // Finally, project each dxd rotation block to SO(3) + Values values; + for (size_t j = 0; j < nrUnknowns(); ++j) { + const Rot3 Ri = Rot3::ClosestTo(R.middleCols<3>(3 * j)); + values.insert(j, Ri); + } + return values; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::roundSolution(const Values &values) const { + // Project to pxdN Stiefel manifold... + Matrix S = StiefelElementMatrix(values); + // ...and call version above. + return roundSolutionS(S); +} + +/* ************************************************************************* */ +template +double ShonanAveraging::cost(const Values &values) const { + NonlinearFactorGraph graph; + for (const auto &measurement : measurements_) { + const auto &keys = measurement.keys(); + const auto &Rij = measurement.measured(); + const auto &model = measurement.noiseModel(); + graph.emplace_shared>>( + keys[0], keys[1], SO(Rij.matrix()), model); + } + // Finally, project each dxd rotation block to SO(d) + Values result; + for (const auto it : values.filter()) { + result.insert(it.key, SO(it.value.matrix())); + } + return graph.error(result); +} + +/* ************************************************************************* */ +// Get kappa from noise model +template +static double Kappa(const BinaryMeasurement &measurement) { + const auto &isotropic = boost::dynamic_pointer_cast( + measurement.noiseModel()); + if (!isotropic) { + throw std::invalid_argument( + "Shonan averaging noise models must be isotropic."); + } + const double sigma = isotropic->sigma(); + return 1.0 / (sigma * sigma); +} + +/* ************************************************************************* */ +template Sparse ShonanAveraging::buildD() const { + // Each measurement contributes 2*d elements along the diagonal of the + // degree matrix. + static constexpr size_t stride = 2 * d; + + // Reserve space for triplets + std::vector> triplets; + triplets.reserve(stride * measurements_.size()); + + for (const auto &measurement : measurements_) { + // Get pose keys + const auto &keys = measurement.keys(); + + // Get kappa from noise model + double kappa = Kappa(measurement); + + const size_t di = d * keys[0], dj = d * keys[1]; + for (size_t k = 0; k < d; k++) { + // Elements of ith block-diagonal + triplets.emplace_back(di + k, di + k, kappa); + // Elements of jth block-diagonal + triplets.emplace_back(dj + k, dj + k, kappa); + } + } + + // Construct and return a sparse matrix from these triplets + const size_t dN = d * nrUnknowns(); + Sparse D(dN, dN); + D.setFromTriplets(triplets.begin(), triplets.end()); + + return D; +} + +/* ************************************************************************* */ +template Sparse ShonanAveraging::buildQ() const { + // Each measurement contributes 2*d^2 elements on a pair of symmetric + // off-diagonal blocks + static constexpr size_t stride = 2 * d * d; + + // Reserve space for triplets + std::vector> triplets; + triplets.reserve(stride * measurements_.size()); + + for (const auto &measurement : measurements_) { + // Get pose keys + const auto &keys = measurement.keys(); + + // Extract rotation measurement + const auto Rij = measurement.measured().matrix(); + + // Get kappa from noise model + double kappa = Kappa(measurement); + + const size_t di = d * keys[0], dj = d * keys[1]; + for (size_t r = 0; r < d; r++) { + for (size_t c = 0; c < d; c++) { + // Elements of ij block + triplets.emplace_back(di + r, dj + c, kappa * Rij(r, c)); + // Elements of ji block + triplets.emplace_back(dj + r, di + c, kappa * Rij(c, r)); + } + } + } + + // Construct and return a sparse matrix from these triplets + const size_t dN = d * nrUnknowns(); + Sparse Q(dN, dN); + Q.setFromTriplets(triplets.begin(), triplets.end()); + + return Q; +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeLambda(const Matrix &S) const { + // Each pose contributes 2*d elements along the diagonal of Lambda + static constexpr size_t stride = d * d; + + // Reserve space for triplets + const size_t N = nrUnknowns(); + std::vector> triplets; + triplets.reserve(stride * N); + + // Do sparse-dense multiply to get Q*S' + auto QSt = Q_ * S.transpose(); + + for (size_t j = 0; j < N; j++) { + // Compute B, the building block for the j^th diagonal block of Lambda + const size_t dj = d * j; + Matrix B = QSt.middleRows(dj, d) * S.middleCols(dj); + + // Elements of jth block-diagonal + for (size_t r = 0; r < d; r++) + for (size_t c = 0; c < d; c++) + triplets.emplace_back(dj + r, dj + c, 0.5 * (B(r, c) + B(c, r))); + } + + // Construct and return a sparse matrix from these triplets + Sparse Lambda(d * N, d * N); + Lambda.setFromTriplets(triplets.begin(), triplets.end()); + return Lambda; +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeLambda(const Values &values) const { + // Project to pxdN Stiefel manifold... + Matrix S = StiefelElementMatrix(values); + // ...and call version above. + return computeLambda(S); +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeA(const Values &values) const { + assert(values.size() == nrUnknowns()); + const Matrix S = StiefelElementMatrix(values); + auto Lambda = computeLambda(S); + return Lambda - Q_; +} + +/* ************************************************************************* */ +/// MINIMUM EIGENVALUE COMPUTATIONS + +/** This is a lightweight struct used in conjunction with Spectra to compute + * the minimum eigenvalue and eigenvector of a sparse matrix A; it has a single + * nontrivial function, perform_op(x,y), that computes and returns the product + * y = (A + sigma*I) x */ +struct MatrixProdFunctor { + // Const reference to an externally-held matrix whose minimum-eigenvalue we + // want to compute + const Sparse &A_; + + // Spectral shift + double sigma_; + + // Constructor + explicit MatrixProdFunctor(const Sparse &A, double sigma = 0) + : A_(A), sigma_(sigma) {} + + int rows() const { return A_.rows(); } + int cols() const { return A_.cols(); } + + // Matrix-vector multiplication operation + void perform_op(const double *x, double *y) const { + // Wrap the raw arrays as Eigen Vector types + Eigen::Map X(x, rows()); + Eigen::Map Y(y, rows()); + + // Do the multiplication using wrapped Eigen vectors + Y = A_ * X + sigma_ * X; + } +}; + +/// Function to compute the minimum eigenvalue of A using Lanczos in Spectra. +/// This does 2 things: +/// +/// (1) Quick (coarse) eigenvalue computation to estimate the largest-magnitude +/// eigenvalue (2) A second eigenvalue computation applied to A-sigma*I, where +/// sigma is chosen to make the minimum eigenvalue of A the extremal eigenvalue +/// of A-sigma*I +/// +/// Upon completion, this returns a boolean value indicating whether the minimum +/// eigenvalue was computed to the required precision -- if so, its sets the +/// values of minEigenValue and minEigenVector appropriately + +/// Note that in the following function signature, S is supposed to be the +/// block-row-matrix that is a critical point for the optimization algorithm; +/// either S (Stiefel manifold) or R (block rotations). We use this to +/// construct a starting vector v for the Lanczos process that will be close to +/// the minimum eigenvector we're looking for whenever the relaxation is exact +/// -- this is a key feature that helps to make this method fast. Note that +/// instead of passing in all of S, it would be enough to pass in one of S's +/// *rows*, if that's more convenient. + +// For the defaults, David Rosen says: +// - maxIterations refers to the max number of Lanczos iterations to run; +// ~1000 should be sufficiently large +// - We've been using 10^-4 for the nonnegativity tolerance +// - for numLanczosVectors, 20 is a good default value + +static bool +SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue, + Vector *minEigenVector = 0, size_t *numIterations = 0, + size_t maxIterations = 1000, + double minEigenvalueNonnegativityTolerance = 10e-4, + Eigen::Index numLanczosVectors = 20) { + // a. Estimate the largest-magnitude eigenvalue of this matrix using Lanczos + MatrixProdFunctor lmOperator(A); + Spectra::SymEigsSolver + lmEigenValueSolver(&lmOperator, 1, std::min(numLanczosVectors, A.rows())); + lmEigenValueSolver.init(); + + const int lmConverged = lmEigenValueSolver.compute( + maxIterations, 1e-4, Spectra::SELECT_EIGENVALUE::LARGEST_MAGN); + + // Check convergence and bail out if necessary + if (lmConverged != 1) + return false; + + const double lmEigenValue = lmEigenValueSolver.eigenvalues()(0); + + if (lmEigenValue < 0) { + // The largest-magnitude eigenvalue is negative, and therefore also the + // minimum eigenvalue, so just return this solution + *minEigenValue = lmEigenValue; + if (minEigenVector) { + *minEigenVector = lmEigenValueSolver.eigenvectors(1).col(0); + minEigenVector->normalize(); // Ensure that this is a unit vector + } + return true; + } + + // The largest-magnitude eigenvalue is positive, and is therefore the + // maximum eigenvalue. Therefore, after shifting the spectrum of A + // by -2*lmEigenValue (by forming A - 2*lambda_max*I), the shifted + // spectrum will lie in the interval [minEigenValue(A) - 2* lambda_max(A), + // -lambda_max*A]; in particular, the largest-magnitude eigenvalue of + // A - 2*lambda_max*I is minEigenValue - 2*lambda_max, with corresponding + // eigenvector v_min + + MatrixProdFunctor minShiftedOperator(A, -2 * lmEigenValue); + + Spectra::SymEigsSolver + minEigenValueSolver(&minShiftedOperator, 1, + std::min(numLanczosVectors, A.rows())); + + // If S is a critical point of F, then S^T is also in the null space of S - + // Lambda(S) (cf. Lemma 6 of the tech report), and therefore its rows are + // eigenvectors corresponding to the eigenvalue 0. In the case that the + // relaxation is exact, this is the *minimum* eigenvalue, and therefore the + // rows of S are exactly the eigenvectors that we're looking for. On the + // other hand, if the relaxation is *not* exact, then S - Lambda(S) has at + // least one strictly negative eigenvalue, and the rows of S are *unstable + // fixed points* for the Lanczos iterations. Thus, we will take a slightly + // "fuzzed" version of the first row of S as an initialization for the + // Lanczos iterations; this allows for rapid convergence in the case that + // the relaxation is exact (since are starting close to a solution), while + // simultaneously allowing the iterations to escape from this fixed point in + // the case that the relaxation is not exact. + Vector v0 = S.row(0).transpose(); + Vector perturbation(v0.size()); + perturbation.setRandom(); + perturbation.normalize(); + Vector xinit = v0 + (.03 * v0.norm()) * perturbation; // Perturb v0 by ~3% + + // Use this to initialize the eigensolver + minEigenValueSolver.init(xinit.data()); + + // Now determine the relative precision required in the Lanczos method in + // order to be able to estimate the smallest eigenvalue within an *absolute* + // tolerance of 'minEigenvalueNonnegativityTolerance' + const int minConverged = minEigenValueSolver.compute( + maxIterations, minEigenvalueNonnegativityTolerance / lmEigenValue, + Spectra::SELECT_EIGENVALUE::LARGEST_MAGN); + + if (minConverged != 1) + return false; + + *minEigenValue = minEigenValueSolver.eigenvalues()(0) + 2 * lmEigenValue; + if (minEigenVector) { + *minEigenVector = minEigenValueSolver.eigenvectors(1).col(0); + minEigenVector->normalize(); // Ensure that this is a unit vector + } + if (numIterations) + *numIterations = minEigenValueSolver.num_iterations(); + return true; +} + +/* ************************************************************************* */ +template Sparse ShonanAveraging::computeA(const Matrix &S) const { + auto Lambda = computeLambda(S); + return Lambda - Q_; +} + +/* ************************************************************************* */ +template +double ShonanAveraging::computeMinEigenValue(const Values &values, + Vector *minEigenVector) const { + assert(values.size() == nrUnknowns()); + const Matrix S = StiefelElementMatrix(values); + auto A = computeA(S); + + double minEigenValue; + bool success = SparseMinimumEigenValue(A, S, &minEigenValue, minEigenVector); + if (!success) { + throw std::runtime_error( + "SparseMinimumEigenValue failed to compute minimum eigenvalue."); + } + return minEigenValue; +} + +/* ************************************************************************* */ +template +std::pair +ShonanAveraging::computeMinEigenVector(const Values &values) const { + Vector minEigenVector; + double minEigenValue = computeMinEigenValue(values, &minEigenVector); + return std::make_pair(minEigenValue, minEigenVector); +} + +/* ************************************************************************* */ +template +bool ShonanAveraging::checkOptimality(const Values &values) const { + double minEigenValue = computeMinEigenValue(values); + return minEigenValue > parameters_.optimalityThreshold; +} + +/* ************************************************************************* */ +/// Create a tangent direction xi with eigenvector segment v_i +template +Vector ShonanAveraging::MakeATangentVector(size_t p, const Vector &v, + size_t i) { + // Create a tangent direction xi with eigenvector segment v_i + const size_t dimension = SOn::Dimension(p); + const auto v_i = v.segment(d * i); + Vector xi = Vector::Zero(dimension); + double sign = pow(-1.0, round((p + 1) / 2) + 1); + for (size_t j = 0; j < d; j++) { + xi(j + p - d - 1) = sign * v_i(d - j - 1); + sign = -sign; + } + return xi; +} + +/* ************************************************************************* */ +template +Matrix ShonanAveraging::riemannianGradient(size_t p, + const Values &values) const { + Matrix S_dot = StiefelElementMatrix(values); + // calculate the gradient of F(Q_dot) at Q_dot + Matrix euclideanGradient = 2 * (L_ * (S_dot.transpose())).transpose(); + // cout << "euclidean gradient rows and cols" << euclideanGradient.rows() << + // "\t" << euclideanGradient.cols() << endl; + + // project the gradient onto the entire euclidean space + Matrix symBlockDiagProduct(p, d * nrUnknowns()); + for (size_t i = 0; i < nrUnknowns(); i++) { + // Compute block product + const size_t di = d * i; + const Matrix P = S_dot.middleCols(di).transpose() * + euclideanGradient.middleCols(di); + // Symmetrize this block + Matrix S = .5 * (P + P.transpose()); + // Compute S_dot * S and set corresponding block + symBlockDiagProduct.middleCols(di) = S_dot.middleCols(di) * S; + } + Matrix riemannianGradient = euclideanGradient - symBlockDiagProduct; + return riemannianGradient; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::LiftwithDescent(size_t p, const Values &values, + const Vector &minEigenVector) { + Values lifted = LiftTo(p, values); + for (auto it : lifted.filter()) { + // Create a tangent direction xi with eigenvector segment v_i + // Assumes key is 0-based integer + const Vector xi = MakeATangentVector(p, minEigenVector, it.key); + // Move the old value in the descent direction + it.value = it.value.retract(xi); + } + return lifted; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeWithDescent( + size_t p, const Values &values, const Vector &minEigenVector, + double minEigenValue, double gradienTolerance, + double preconditionedGradNormTolerance) const { + double funcVal = costAt(p - 1, values); + double alphaMin = 1e-2; + double alpha = + std::max(1024 * alphaMin, 10 * gradienTolerance / fabs(minEigenValue)); + vector alphas; + vector fvals; + // line search + while ((alpha >= alphaMin)) { + Values Qplus = LiftwithDescent(p, values, alpha * minEigenVector); + double funcValTest = costAt(p, Qplus); + Matrix gradTest = riemannianGradient(p, Qplus); + double gradTestNorm = gradTest.norm(); + // Record alpha and funcVal + alphas.push_back(alpha); + fvals.push_back(funcValTest); + if ((funcVal > funcValTest) && (gradTestNorm > gradienTolerance)) { + return Qplus; + } + alpha /= 2; + } + + auto fminIter = min_element(fvals.begin(), fvals.end()); + auto minIdx = distance(fvals.begin(), fminIter); + double fMin = fvals[minIdx]; + double aMin = alphas[minIdx]; + if (fMin < funcVal) { + Values Qplus = LiftwithDescent(p, values, aMin * minEigenVector); + return Qplus; + } + + return LiftwithDescent(p, values, alpha * minEigenVector); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomly(std::mt19937 &rng) const { + Values initial; + for (size_t j = 0; j < nrUnknowns(); j++) { + initial.insert(j, Rot::Random(rng)); + } + return initial; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomly() const { + return ShonanAveraging::initializeRandomly(kRandomNumberGenerator); +} + +/* ************************************************************************* */ +template +std::pair ShonanAveraging::run(const Values &initialEstimate, + size_t pMin, + size_t pMax) const { + Values Qstar; + Values initialSOp = LiftTo(pMin, initialEstimate); // lift to pMin! + for (size_t p = pMin; p <= pMax; p++) { + // Optimize until convergence at this level + Qstar = tryOptimizingAt(p, initialSOp); + + // Check certificate of global optimzality + Vector minEigenVector; + double minEigenValue = computeMinEigenValue(Qstar, &minEigenVector); + if (minEigenValue > parameters_.optimalityThreshold) { + // If at global optimum, round and return solution + const Values SO3Values = roundSolution(Qstar); + return std::make_pair(SO3Values, minEigenValue); + } + + // Not at global optimimum yet, so check whether we will go to next level + if (p != pMax) { + // Calculate initial estimate for next level by following minEigenVector + initialSOp = + initializeWithDescent(p + 1, Qstar, minEigenVector, minEigenValue); + } + } + throw std::runtime_error("Shonan::run did not converge for given pMax"); +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 +template class ShonanAveraging<2>; + +ShonanAveraging2::ShonanAveraging2(const Measurements &measurements, + const Parameters ¶meters) + : ShonanAveraging<2>(measurements, parameters) {} +ShonanAveraging2::ShonanAveraging2(string g2oFile, const Parameters ¶meters) + : ShonanAveraging<2>(parseMeasurements(g2oFile), parameters) {} + +/* ************************************************************************* */ +// Explicit instantiation for d=3 +template class ShonanAveraging<3>; + +ShonanAveraging3::ShonanAveraging3(const Measurements &measurements, + const Parameters ¶meters) + : ShonanAveraging<3>(measurements, parameters) {} + +ShonanAveraging3::ShonanAveraging3(string g2oFile, const Parameters ¶meters) + : ShonanAveraging<3>(parseMeasurements(g2oFile), parameters) {} + +// TODO(frank): Deprecate after we land pybind wrapper + +// Extract Rot3 measurement from Pose3 betweenfactors +// Modeled after similar function in dataset.cpp +static BinaryMeasurement +convert(const BetweenFactor::shared_ptr &f) { + auto gaussian = + boost::dynamic_pointer_cast(f->noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose3 measurements " + "with Gaussian noise models."); + const Matrix6 M = gaussian->covariance(); + return BinaryMeasurement( + f->key1(), f->key2(), f->measured().rotation(), + noiseModel::Gaussian::Covariance(M.block<3, 3>(3, 3), true)); +} + +static ShonanAveraging3::Measurements +extractRot3Measurements(const BetweenFactorPose3s &factors) { + ShonanAveraging3::Measurements result; + result.reserve(factors.size()); + for (auto f : factors) + result.push_back(convert(f)); + return result; +} + +ShonanAveraging3::ShonanAveraging3(const BetweenFactorPose3s &factors, + const Parameters ¶meters) + : ShonanAveraging<3>(extractRot3Measurements(factors), parameters) {} + +/* ************************************************************************* */ +} // namespace gtsam diff --git a/gtsam/sfm/ShonanAveraging.h b/gtsam/sfm/ShonanAveraging.h new file mode 100644 index 000000000..90c0bda63 --- /dev/null +++ b/gtsam/sfm/ShonanAveraging.h @@ -0,0 +1,356 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanAveraging.h + * @date March 2019 - August 2020 + * @author Frank Dellaert, David Rosen, and Jing Wu + * @brief Shonan Averaging algorithm + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace gtsam { +class NonlinearFactorGraph; +class LevenbergMarquardtOptimizer; + +/// Parameters governing optimization etc. +template struct GTSAM_EXPORT ShonanAveragingParameters { + // Select Rot2 or Rot3 interface based template parameter d + using Rot = typename std::conditional::type; + using Anchor = std::pair; + + // Paremeters themselves: + LevenbergMarquardtParams lm; // LM parameters + double optimalityThreshold; // threshold used in checkOptimality + Anchor anchor; // pose to use as anchor if not Karcher + double alpha; // weight of anchor-based prior (default 0) + double beta; // weight of Karcher-based prior (default 1) + double gamma; // weight of gauge-fixing factors (default 0) + + ShonanAveragingParameters(const LevenbergMarquardtParams &lm = + LevenbergMarquardtParams::CeresDefaults(), + const std::string &method = "JACOBI", + double optimalityThreshold = -1e-4, + double alpha = 0.0, double beta = 1.0, + double gamma = 0.0); + + LevenbergMarquardtParams getLMParams() const { return lm; } + + void setOptimalityThreshold(double value) { optimalityThreshold = value; } + double getOptimalityThreshold() const { return optimalityThreshold; } + + void setAnchor(size_t index, const Rot &value) { anchor = {index, value}; } + + void setAnchorWeight(double value) { alpha = value; } + double getAnchorWeight() { return alpha; } + + void setKarcherWeight(double value) { beta = value; } + double getKarcherWeight() { return beta; } + + void setGaugesWeight(double value) { gamma = value; } + double getGaugesWeight() { return gamma; } +}; + +using ShonanAveragingParameters2 = ShonanAveragingParameters<2>; +using ShonanAveragingParameters3 = ShonanAveragingParameters<3>; + +/** + * Class that implements Shonan Averaging from our ECCV'20 paper. + * Note: The "basic" API uses all Rot values (Rot2 or Rot3, depending on value + * of d), whereas the different levels and "advanced" API at SO(p) needs Values + * of type SOn. + * + * The template parameter d can be 2 or 3. + * Both are specialized in the .cpp file. + * + * If you use this code in your work, please consider citing our paper: + * Shonan Rotation Averaging, Global Optimality by Surfing SO(p)^n + * Frank Dellaert, David M. Rosen, Jing Wu, Robert Mahony, and Luca Carlone, + * European Computer Vision Conference, 2020. + * You can view our ECCV spotlight video at https://youtu.be/5ppaqMyHtE0 + */ +template class GTSAM_EXPORT ShonanAveraging { +public: + using Sparse = Eigen::SparseMatrix; + + // Define the Parameters type and use its typedef of the rotation type: + using Parameters = ShonanAveragingParameters; + using Rot = typename Parameters::Rot; + + // We store SO(d) BetweenFactors to get noise model + // TODO(frank): use BinaryMeasurement? + using Measurements = std::vector>; + +private: + Parameters parameters_; + Measurements measurements_; + size_t nrUnknowns_; + Sparse D_; // Sparse (diagonal) degree matrix + Sparse Q_; // Sparse measurement matrix, == \tilde{R} in Eriksson18cvpr + Sparse L_; // connection Laplacian L = D - Q, needed for optimality check + + /** + * Build 3Nx3N sparse matrix consisting of rotation measurements, arranged as + * (i,j) and (j,i) blocks within a sparse matrix. + */ + Sparse buildQ() const; + + /// Build 3Nx3N sparse degree matrix D + Sparse buildD() const; + +public: + /// @name Standard Constructors + /// @{ + + /// Construct from set of relative measurements (given as BetweenFactor + /// for now) NoiseModel *must* be isotropic. + ShonanAveraging(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + + /// @} + /// @name Query properties + /// @{ + + /// Return number of unknowns + size_t nrUnknowns() const { return nrUnknowns_; } + + /// Return number of measurements + size_t nrMeasurements() const { return measurements_.size(); } + + /// k^th binary measurement + const BinaryMeasurement &measurement(size_t k) const { + return measurements_[k]; + } + + /// k^th measurement, as a Rot. + const Rot &measured(size_t k) const { return measurements_[k].measured(); } + + /// Keys for k^th measurement, as a vector of Key values. + const KeyVector &keys(size_t k) const { return measurements_[k].keys(); } + + /// @} + /// @name Matrix API (advanced use, debugging) + /// @{ + + Sparse D() const { return D_; } ///< Sparse version of D + Matrix denseD() const { return Matrix(D_); } ///< Dense version of D + Sparse Q() const { return Q_; } ///< Sparse version of Q + Matrix denseQ() const { return Matrix(Q_); } ///< Dense version of Q + Sparse L() const { return L_; } ///< Sparse version of L + Matrix denseL() const { return Matrix(L_); } ///< Dense version of L + + /// Version that takes pxdN Stiefel manifold elements + Sparse computeLambda(const Matrix &S) const; + + /// Dense versions of computeLambda for wrapper/testing + Matrix computeLambda_(const Values &values) const { + return Matrix(computeLambda(values)); + } + + /// Dense versions of computeLambda for wrapper/testing + Matrix computeLambda_(const Matrix &S) const { + return Matrix(computeLambda(S)); + } + + /// Compute A matrix whose Eigenvalues we will examine + Sparse computeA(const Values &values) const; + + /// Version that takes pxdN Stiefel manifold elements + Sparse computeA(const Matrix &S) const; + + /// Dense version of computeA for wrapper/testing + Matrix computeA_(const Values &values) const { + return Matrix(computeA(values)); + } + + /// Project to pxdN Stiefel manifold + static Matrix StiefelElementMatrix(const Values &values); + + /** + * Compute minimum eigenvalue for optimality check. + * @param values: should be of type SOn + */ + double computeMinEigenValue(const Values &values, + Vector *minEigenVector = nullptr) const; + + /// Project pxdN Stiefel manifold matrix S to Rot3^N + Values roundSolutionS(const Matrix &S) const; + + /// Create a tangent direction xi with eigenvector segment v_i + static Vector MakeATangentVector(size_t p, const Vector &v, size_t i); + + /// Calculate the riemannian gradient of F(values) at values + Matrix riemannianGradient(size_t p, const Values &values) const; + + /** + * Lift up the dimension of values in type SO(p-1) with descent direction + * provided by minEigenVector and return new values in type SO(p) + */ + static Values LiftwithDescent(size_t p, const Values &values, + const Vector &minEigenVector); + + /** + * Given some values at p-1, return new values at p, by doing a line search + * along the descent direction, computed from the minimum eigenvector at p-1. + * @param values should be of type SO(p-1) + * @param minEigenVector corresponding to minEigenValue at level p-1 + * @return values of type SO(p) + */ + Values + initializeWithDescent(size_t p, const Values &values, + const Vector &minEigenVector, double minEigenValue, + double gradienTolerance = 1e-2, + double preconditionedGradNormTolerance = 1e-4) const; + /// @} + /// @name Advanced API + /// @{ + + /** + * Build graph for SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + */ + NonlinearFactorGraph buildGraphAt(size_t p) const; + + /** + * Calculate cost for SO(p) + * Values should be of type SO(p) + */ + double costAt(size_t p, const Values &values) const; + + /** + * Given an estimated local minimum Yopt for the (possibly lifted) + * relaxation, this function computes and returns the block-diagonal elements + * of the corresponding Lagrange multiplier. + */ + Sparse computeLambda(const Values &values) const; + + /** + * Compute minimum eigenvalue for optimality check. + * @param values: should be of type SOn + * @return minEigenVector and minEigenValue + */ + std::pair computeMinEigenVector(const Values &values) const; + + /** + * Check optimality + * @param values: should be of type SOn + */ + bool checkOptimality(const Values &values) const; + + /** + * Try to create optimizer at SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + * @param initial initial SO(p) values + * @return lm optimizer + */ + boost::shared_ptr createOptimizerAt( + size_t p, const Values &initial) const; + + /** + * Try to optimize at SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + * @param initial initial SO(p) values + * @return SO(p) values + */ + Values tryOptimizingAt(size_t p, const Values &initial) const; + + /** + * Project from SO(p) to Rot2 or Rot3 + * Values should be of type SO(p) + */ + Values projectFrom(size_t p, const Values &values) const; + + /** + * Project from SO(p)^N to Rot2^N or Rot3^N + * Values should be of type SO(p) + */ + Values roundSolution(const Values &values) const; + + /// Lift Values of type T to SO(p) + template static Values LiftTo(size_t p, const Values &values) { + Values result; + for (const auto it : values.filter()) { + result.insert(it.key, SOn::Lift(p, it.value.matrix())); + } + return result; + } + + /// @} + /// @name Basic API + /// @{ + + /** + * Calculate cost for SO(3) + * Values should be of type Rot3 + */ + double cost(const Values &values) const; + + /** + * Initialize randomly at SO(d) + * @param rng random number generator + * Example: + * std::mt19937 rng(42); + * Values initial = initializeRandomly(rng, p); + */ + Values initializeRandomly(std::mt19937 &rng) const; + + /// Random initialization for wrapper, fixed random seed. + Values initializeRandomly() const; + + /** + * Optimize at different values of p until convergence. + * @param initial initial Rot3 values + * @param pMin value of p to start Riemanian staircase at (default: d). + * @param pMax maximum value of p to try (default: 10) + * @return (Rot3 values, minimum eigenvalue) + */ + std::pair run(const Values &initialEstimate, size_t pMin = d, + size_t pMax = 10) const; + /// @} +}; + +// Subclasses for d=2 and d=3 that explicitly instantiate, as well as provide a +// convenience interface with file access. + +class ShonanAveraging2 : public ShonanAveraging<2> { +public: + ShonanAveraging2(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + ShonanAveraging2(string g2oFile, const Parameters ¶meters = Parameters()); +}; + +class ShonanAveraging3 : public ShonanAveraging<3> { +public: + ShonanAveraging3(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + ShonanAveraging3(string g2oFile, const Parameters ¶meters = Parameters()); + + // TODO(frank): Deprecate after we land pybind wrapper + ShonanAveraging3(const BetweenFactorPose3s &factors, + const Parameters ¶meters = Parameters()); +}; +} // namespace gtsam diff --git a/gtsam/sfm/ShonanFactor.cpp b/gtsam/sfm/ShonanFactor.cpp new file mode 100644 index 000000000..b911fb5a4 --- /dev/null +++ b/gtsam/sfm/ShonanFactor.cpp @@ -0,0 +1,141 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file FrobeniusFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Various factors that minimize some Frobenius norm + */ + +#include + +#include +#include +#include + +#include +#include +#include + +using namespace std; + +namespace gtsam { + +//****************************************************************************** +template +ShonanFactor::ShonanFactor(Key j1, Key j2, const Rot &R12, size_t p, + const SharedNoiseModel &model, + const boost::shared_ptr &G) + : NoiseModelFactor2(ConvertNoiseModel(model, p * d), j1, j2), + M_(R12.matrix()), // d*d in all cases + p_(p), // 4 for SO(4) + pp_(p * p), // 16 for SO(4) + G_(G) { + if (noiseModel()->dim() != d * p_) + throw std::invalid_argument( + "ShonanFactor: model with incorrect dimension."); + if (!G) { + G_ = boost::make_shared(); + *G_ = SOn::VectorizedGenerators(p); // expensive! + } + if (static_cast(G_->rows()) != pp_ || + static_cast(G_->cols()) != SOn::Dimension(p)) + throw std::invalid_argument("ShonanFactor: passed in generators " + "of incorrect dimension."); +} + +//****************************************************************************** +template +void ShonanFactor::print(const std::string &s, + const KeyFormatter &keyFormatter) const { + std::cout << s << "ShonanFactor<" << p_ << ">(" << keyFormatter(key1()) << "," + << keyFormatter(key2()) << ")\n"; + traits::Print(M_, " M: "); + noiseModel_->print(" noise model: "); +} + +//****************************************************************************** +template +bool ShonanFactor::equals(const NonlinearFactor &expected, + double tol) const { + auto e = dynamic_cast(&expected); + return e != nullptr && NoiseModelFactor2::equals(*e, tol) && + p_ == e->p_ && M_ == e->M_; +} + +//****************************************************************************** +template +void ShonanFactor::fillJacobians(const Matrix &M1, const Matrix &M2, + boost::optional H1, + boost::optional H2) const { + gttic(ShonanFactor_Jacobians); + const size_t dim = p_ * d; // Stiefel manifold dimension + + if (H1) { + // If asked, calculate Jacobian H1 as as (M' \otimes M1) * G + // M' = dxd, M1 = pxp, G = (p*p)xDim(p), result should be dim x Dim(p) + // (M' \otimes M1) is dim*dim, but last pp-dim columns are zero + *H1 = Matrix::Zero(dim, G_->cols()); + for (size_t j = 0; j < d; j++) { + auto MG_j = M1 * G_->middleRows(j * p_, p_); // p_ * Dim(p) + for (size_t i = 0; i < d; i++) { + H1->middleRows(i * p_, p_) -= M_(j, i) * MG_j; + } + } + } + if (H2) { + // If asked, calculate Jacobian H2 as as (I_d \otimes M2) * G + // I_d = dxd, M2 = pxp, G = (p*p)xDim(p), result should be dim x Dim(p) + // (I_d \otimes M2) is dim*dim, but again last pp-dim columns are zero + H2->resize(dim, G_->cols()); + for (size_t i = 0; i < d; i++) { + H2->middleRows(i * p_, p_) = M2 * G_->middleRows(i * p_, p_); + } + } +} + +//****************************************************************************** +template +Vector ShonanFactor::evaluateError(const SOn &Q1, const SOn &Q2, + boost::optional H1, + boost::optional H2) const { + gttic(ShonanFactor_evaluateError); + + const Matrix &M1 = Q1.matrix(); + const Matrix &M2 = Q2.matrix(); + if (M1.rows() != static_cast(p_) || M2.rows() != static_cast(p_)) + throw std::invalid_argument("Invalid dimension SOn values passed to " + "ShonanFactor::evaluateError"); + + const size_t dim = p_ * d; // Stiefel manifold dimension + Vector fQ2(dim), hQ1(dim); + + // Vectorize and extract only d leftmost columns, i.e. vec(M2*P) + fQ2 << Eigen::Map(M2.data(), dim, 1); + + // Vectorize M1*P*R12 + const Matrix Q1PR12 = M1.leftCols() * M_; + hQ1 << Eigen::Map(Q1PR12.data(), dim, 1); + + this->fillJacobians(M1, M2, H1, H2); + + return fQ2 - hQ1; +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 and d=3 +template class ShonanFactor<2>; +template class ShonanFactor<3>; + +//****************************************************************************** + +} // namespace gtsam diff --git a/gtsam/sfm/ShonanFactor.h b/gtsam/sfm/ShonanFactor.h new file mode 100644 index 000000000..3c43c2c52 --- /dev/null +++ b/gtsam/sfm/ShonanFactor.h @@ -0,0 +1,91 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanFactor.h + * @date March 2019 + * @author Frank Dellaert + * @brief Main factor type in Shonan averaging, on SO(n) pairs + */ + +#pragma once + +#include +#include +#include +#include + +#include + +namespace gtsam { + +/** + * ShonanFactor is a BetweenFactor that moves in SO(p), but will + * land on the SO(d) sub-manifold of SO(p) at the global minimum. It projects + * the SO(p) matrices down to a Stiefel manifold of p*d matrices. + */ +template +class GTSAM_EXPORT ShonanFactor : public NoiseModelFactor2 { + Matrix M_; ///< measured rotation between R1 and R2 + size_t p_, pp_; ///< dimensionality constants + boost::shared_ptr G_; ///< matrix of vectorized generators + + // Select Rot2 or Rot3 interface based template parameter d + using Rot = typename std::conditional::type; + +public: + /// @name Constructor + /// @{ + + /// Constructor. Note we convert to d*p-dimensional noise model. + /// To save memory and mallocs, pass in the vectorized Lie algebra generators: + /// G = boost::make_shared(SOn::VectorizedGenerators(p)); + ShonanFactor(Key j1, Key j2, const Rot &R12, size_t p, + const SharedNoiseModel &model = nullptr, + const boost::shared_ptr &G = nullptr); + + /// @} + /// @name Testable + /// @{ + + /// print with optional string + void + print(const std::string &s, + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override; + + /// assert equality up to a tolerance + bool equals(const NonlinearFactor &expected, + double tol = 1e-9) const override; + + /// @} + /// @name NoiseModelFactor2 methods + /// @{ + + /// Error is Frobenius norm between Q1*P*R12 and Q2*P, where P=[I_3x3;0] + /// projects down from SO(p) to the Stiefel manifold of px3 matrices. + Vector + evaluateError(const SOn &Q1, const SOn &Q2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const override; + /// @} + +private: + /// Calculate Jacobians if asked, Only implemented for d=2 and 3 in .cpp + void fillJacobians(const Matrix &M1, const Matrix &M2, + boost::optional H1, + boost::optional H2) const; +}; + +// Explicit instantiation for d=2 and d=3 in .cpp file: +using ShonanFactor2 = ShonanFactor<2>; +using ShonanFactor3 = ShonanFactor<3>; + +} // namespace gtsam diff --git a/gtsam/sfm/ShonanGaugeFactor.h b/gtsam/sfm/ShonanGaugeFactor.h new file mode 100644 index 000000000..4847c5d58 --- /dev/null +++ b/gtsam/sfm/ShonanGaugeFactor.h @@ -0,0 +1,108 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanGaugeFactor.h + * @date March 2019 + * @author Frank Dellaert + * @brief Factor used in Shonan Averaging to clamp down gauge freedom + */ + +#pragma once + +#include +#include +#include + +#include + +namespace gtsam { +/** + * The ShonanGaugeFactor creates a constraint on a single SO(n) to avoid moving + * in the stabilizer. + * + * Details: SO(p) contains the p*3 Stiefel matrices of orthogonal frames: we + * take those to be the 3 columns on the left. + * The P*P skew-symmetric matrices associated with so(p) can be partitioned as + * (Appendix B in the ECCV paper): + * | [w] -K' | + * | K [g] | + * where w is the SO(3) space, K are the extra Stiefel diemnsions (wormhole!) + * and (g)amma are extra dimensions in SO(p) that do not modify the cost + * function. The latter corresponds to rotations SO(p-d), and so the first few + * values of p-d for d==3 with their corresponding dimensionality are {0:0, 1:0, + * 2:1, 3:3, ...} + * + * The ShonanGaugeFactor adds a unit Jacobian to these extra dimensions, + * essentially restricting the optimization to the Stiefel manifold. + */ +class GTSAM_EXPORT ShonanGaugeFactor : public NonlinearFactor { + // Row dimension, equal to the dimensionality of SO(p-d) + size_t rows_; + + /// Constant Jacobian + boost::shared_ptr whitenedJacobian_; + +public: + /** + * Construct from key for an SO(p) matrix, for base dimension d (2 or 3) + * If parameter gamma is given, it acts as a precision = 1/sigma^2, and + * the Jacobian will be multiplied with 1/sigma = sqrt(gamma). + */ + ShonanGaugeFactor(Key key, size_t p, size_t d = 3, + boost::optional gamma = boost::none) + : NonlinearFactor(boost::assign::cref_list_of<1>(key)) { + if (p < d) { + throw std::invalid_argument("ShonanGaugeFactor must have p>=d."); + } + // Calculate dimensions + size_t q = p - d; + size_t P = SOn::Dimension(p); // dimensionality of SO(p) + rows_ = SOn::Dimension(q); // dimensionality of SO(q), the gauge + + // Create constant Jacobian as a rows_*P matrix: there are rows_ penalized + // dimensions, but it is a bit tricky to find them among the P columns. + // The key is to look at how skew-symmetric matrices are laid out in SOn.h: + // the first tangent dimension will always be included, but beyond that we + // have to be careful. We always need to skip the d top-rows of the skew- + // symmetric matrix as they below to K, part of the Stiefel manifold. + Matrix A(rows_, P); + A.setZero(); + double invSigma = gamma ? std::sqrt(*gamma) : 1.0; + size_t i = 0, j = 0, n = p - 1 - d; + while (i < rows_) { + A.block(i, j, n, n) = invSigma * Matrix::Identity(n, n); + i += n; + j += n + d; // skip d columns + n -= 1; + } + // TODO(frank): assign the right one in the right columns + whitenedJacobian_ = + boost::make_shared(key, A, Vector::Zero(rows_)); + } + + /// Destructor + virtual ~ShonanGaugeFactor() {} + + /// Calculate the error of the factor: always zero + double error(const Values &c) const override { return 0; } + + /// get the dimension of the factor (number of rows on linearization) + size_t dim() const override { return rows_; } + + /// linearize to a GaussianFactor + boost::shared_ptr linearize(const Values &c) const override { + return whitenedJacobian_; + } +}; +// \ShonanGaugeFactor + +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sfm/TranslationFactor.h b/gtsam/sfm/TranslationFactor.h index d63633d7e..d1f85cf01 100644 --- a/gtsam/sfm/TranslationFactor.h +++ b/gtsam/sfm/TranslationFactor.h @@ -36,6 +36,8 @@ namespace gtsam { * normalized(Tb - Ta) - w_aZb.point3() * * @addtogroup SFM + * + * */ class TranslationFactor : public NoiseModelFactor2 { private: diff --git a/gtsam/sfm/TranslationRecovery.cpp b/gtsam/sfm/TranslationRecovery.cpp index aeeae688f..665336cc4 100644 --- a/gtsam/sfm/TranslationRecovery.cpp +++ b/gtsam/sfm/TranslationRecovery.cpp @@ -10,10 +10,10 @@ * -------------------------------------------------------------------------- */ /** - * @file TranslationRecovery.h + * @file TranslationRecovery.cpp * @author Frank Dellaert * @date March 2020 - * @brief test recovering translations when rotations are given. + * @brief Source code for recovering translations when rotations are given */ #include @@ -33,30 +33,25 @@ using namespace gtsam; using namespace std; NonlinearFactorGraph TranslationRecovery::buildGraph() const { - auto noiseModel = noiseModel::Isotropic::Sigma(3, 0.01); NonlinearFactorGraph graph; // Add all relative translation edges for (auto edge : relativeTranslations_) { - Key a, b; - tie(a, b) = edge.first; - const Unit3 w_aZb = edge.second; - graph.emplace_shared(a, b, w_aZb, noiseModel); + graph.emplace_shared(edge.key1(), edge.key2(), + edge.measured(), edge.noiseModel()); } return graph; } void TranslationRecovery::addPrior(const double scale, - NonlinearFactorGraph* graph) const { - auto noiseModel = noiseModel::Isotropic::Sigma(3, 0.01); + NonlinearFactorGraph *graph) const { + //TODO(akshay-krishnan): make this an input argument + auto priorNoiseModel = noiseModel::Isotropic::Sigma(3, 0.01); auto edge = relativeTranslations_.begin(); - Key a, b; - tie(a, b) = edge->first; - const Unit3 w_aZb = edge->second; - graph->emplace_shared >(a, Point3(0, 0, 0), noiseModel); - graph->emplace_shared >(b, scale * w_aZb.point3(), - noiseModel); + graph->emplace_shared >(edge->key1(), Point3(0, 0, 0), priorNoiseModel); + graph->emplace_shared >(edge->key2(), scale * edge->measured().point3(), + edge->noiseModel()); } Values TranslationRecovery::initalizeRandomly() const { @@ -71,10 +66,8 @@ Values TranslationRecovery::initalizeRandomly() const { // Loop over measurements and add a random translation for (auto edge : relativeTranslations_) { - Key a, b; - tie(a, b) = edge.first; - insert(a); - insert(b); + insert(edge.key1()); + insert(edge.key2()); } return initial; } @@ -89,7 +82,8 @@ Values TranslationRecovery::run(const double scale) const { } TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements( - const Values& poses, const vector& edges) { + const Values &poses, const vector &edges) { + auto edgeNoiseModel = noiseModel::Isotropic::Sigma(3, 0.01); TranslationEdges relativeTranslations; for (auto edge : edges) { Key a, b; @@ -97,7 +91,7 @@ TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements( const Pose3 wTa = poses.at(a), wTb = poses.at(b); const Point3 Ta = wTa.translation(), Tb = wTb.translation(); const Unit3 w_aZb(Tb - Ta); - relativeTranslations[edge] = w_aZb; + relativeTranslations.emplace_back(a, b, w_aZb, edgeNoiseModel); } return relativeTranslations; } diff --git a/gtsam/sfm/TranslationRecovery.h b/gtsam/sfm/TranslationRecovery.h index bb3c3cdb1..4c6a95cbe 100644 --- a/gtsam/sfm/TranslationRecovery.h +++ b/gtsam/sfm/TranslationRecovery.h @@ -13,15 +13,16 @@ * @file TranslationRecovery.h * @author Frank Dellaert * @date March 2020 - * @brief test recovering translations when rotations are given. + * @brief Recovering translations in an epipolar graph when rotations are given. */ #include #include #include +#include -#include #include +#include namespace gtsam { @@ -48,7 +49,7 @@ namespace gtsam { class TranslationRecovery { public: using KeyPair = std::pair; - using TranslationEdges = std::map; + using TranslationEdges = std::vector>; private: TranslationEdges relativeTranslations_; @@ -59,13 +60,14 @@ class TranslationRecovery { * @brief Construct a new Translation Recovery object * * @param relativeTranslations the relative translations, in world coordinate - * frames, indexed in a map by a pair of Pose keys. + * frames, vector of BinaryMeasurements of Unit3, where each key of a measurement + * is a point in 3D. * @param lmParams (optional) gtsam::LavenbergMarquardtParams that can be * used to modify the parameters for the LM optimizer. By default, uses the * default LM parameters. */ - TranslationRecovery(const TranslationEdges& relativeTranslations, - const LevenbergMarquardtParams& lmParams = LevenbergMarquardtParams()) + TranslationRecovery(const TranslationEdges &relativeTranslations, + const LevenbergMarquardtParams &lmParams = LevenbergMarquardtParams()) : relativeTranslations_(relativeTranslations), params_(lmParams) { params_.setVerbosityLM("Summary"); } @@ -83,7 +85,7 @@ class TranslationRecovery { * @param scale scale for first relative translation which fixes gauge. * @param graph factor graph to which prior is added. */ - void addPrior(const double scale, NonlinearFactorGraph* graph) const; + void addPrior(const double scale, NonlinearFactorGraph *graph) const; /** * @brief Create random initial translations. @@ -105,10 +107,11 @@ class TranslationRecovery { * * @param poses SE(3) ground truth poses stored as Values * @param edges pairs (a,b) for which a measurement w_aZb will be generated. - * @return TranslationEdges map from a KeyPair to the simulated Unit3 - * translation direction measurement between the cameras in KeyPair. + * @return TranslationEdges vector of binary measurements where the keys are + * the cameras and the measurement is the simulated Unit3 translation + * direction between the cameras. */ static TranslationEdges SimulateMeasurements( - const Values& poses, const std::vector& edges); + const Values &poses, const std::vector &edges); }; } // namespace gtsam diff --git a/gtsam/sfm/tests/testBinaryMeasurement.cpp b/gtsam/sfm/tests/testBinaryMeasurement.cpp new file mode 100644 index 000000000..3dd81c2c1 --- /dev/null +++ b/gtsam/sfm/tests/testBinaryMeasurement.cpp @@ -0,0 +1,70 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testBinaryMeasurement.cpp + * @brief Unit tests for BinaryMeasurement class + * @author Akshay Krishnan + * @date July 2020 + */ + +#include + +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +static const Key kKey1(2), kKey2(1); + +// Create noise models for unit3 and rot3 +static SharedNoiseModel unit3_model(noiseModel::Isotropic::Sigma(2, 0.05)); +static SharedNoiseModel rot3_model(noiseModel::Isotropic::Sigma(3, 0.05)); + +const Unit3 unit3Measured(Vector3(1, 1, 1)); +const Rot3 rot3Measured; + +TEST(BinaryMeasurement, Unit3) { + BinaryMeasurement unit3Measurement(kKey1, kKey2, unit3Measured, + unit3_model); + EXPECT_LONGS_EQUAL(unit3Measurement.key1(), kKey1); + EXPECT_LONGS_EQUAL(unit3Measurement.key2(), kKey2); + EXPECT(unit3Measurement.measured().equals(unit3Measured)); + + BinaryMeasurement unit3MeasurementCopy(kKey1, kKey2, unit3Measured, + unit3_model); + EXPECT(unit3Measurement.equals(unit3MeasurementCopy)); +} + +TEST(BinaryMeasurement, Rot3) { + // testing the accessors + BinaryMeasurement rot3Measurement(kKey1, kKey2, rot3Measured, + rot3_model); + EXPECT_LONGS_EQUAL(rot3Measurement.key1(), kKey1); + EXPECT_LONGS_EQUAL(rot3Measurement.key2(), kKey2); + EXPECT(rot3Measurement.measured().equals(rot3Measured)); + + // testing the equals function + BinaryMeasurement rot3MeasurementCopy(kKey1, kKey2, rot3Measured, + rot3_model); + EXPECT(rot3Measurement.equals(rot3MeasurementCopy)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanAveraging.cpp b/gtsam/sfm/tests/testShonanAveraging.cpp new file mode 100644 index 000000000..cc4319e15 --- /dev/null +++ b/gtsam/sfm/tests/testShonanAveraging.cpp @@ -0,0 +1,360 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanAveraging.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Unit tests for Shonan Averaging algorithm + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +template +using Rot = typename std::conditional::type; + +template +using Pose = typename std::conditional::type; + +ShonanAveraging3 fromExampleName( + const std::string &name, + ShonanAveraging3::Parameters parameters = ShonanAveraging3::Parameters()) { + string g2oFile = findExampleDataFile(name); + return ShonanAveraging3(g2oFile, parameters); +} + +static const ShonanAveraging3 kShonan = fromExampleName("toyExample.g2o"); + +static std::mt19937 kRandomNumberGenerator(42); + +/* ************************************************************************* */ +TEST(ShonanAveraging3, checkConstructor) { + EXPECT_LONGS_EQUAL(5, kShonan.nrUnknowns()); + + EXPECT_LONGS_EQUAL(15, kShonan.D().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.D().cols()); + auto D = kShonan.denseD(); + EXPECT_LONGS_EQUAL(15, D.rows()); + EXPECT_LONGS_EQUAL(15, D.cols()); + + EXPECT_LONGS_EQUAL(15, kShonan.Q().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.Q().cols()); + auto Q = kShonan.denseQ(); + EXPECT_LONGS_EQUAL(15, Q.rows()); + EXPECT_LONGS_EQUAL(15, Q.cols()); + + EXPECT_LONGS_EQUAL(15, kShonan.L().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.L().cols()); + auto L = kShonan.denseL(); + EXPECT_LONGS_EQUAL(15, L.rows()); + EXPECT_LONGS_EQUAL(15, L.cols()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, buildGraphAt) { + auto graph = kShonan.buildGraphAt(5); + EXPECT_LONGS_EQUAL(7, graph.size()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, checkOptimality) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(4, randomRotations); // lift to 4! + auto Lambda = kShonan.computeLambda(random); + EXPECT_LONGS_EQUAL(15, Lambda.rows()); + EXPECT_LONGS_EQUAL(15, Lambda.cols()); + EXPECT_LONGS_EQUAL(45, Lambda.nonZeros()); + auto lambdaMin = kShonan.computeMinEigenValue(random); + // EXPECT_DOUBLES_EQUAL(-5.2964625490657866, lambdaMin, + // 1e-4); // Regression test + EXPECT_DOUBLES_EQUAL(-414.87376657555996, lambdaMin, + 1e-4); // Regression test + EXPECT(!kShonan.checkOptimality(random)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, tryOptimizingAt3) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values initial = ShonanAveraging3::LiftTo(3, randomRotations); // convert to SOn + EXPECT(!kShonan.checkOptimality(initial)); + const Values result = kShonan.tryOptimizingAt(3, initial); + EXPECT(kShonan.checkOptimality(result)); + auto lambdaMin = kShonan.computeMinEigenValue(result); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, lambdaMin, + 1e-4); // Regression test + EXPECT_DOUBLES_EQUAL(0, kShonan.costAt(3, result), 1e-4); + const Values SO3Values = kShonan.roundSolution(result); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(SO3Values), 1e-4); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, tryOptimizingAt4) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(4, randomRotations); // lift to 4! + const Values result = kShonan.tryOptimizingAt(4, random); + EXPECT(kShonan.checkOptimality(result)); + EXPECT_DOUBLES_EQUAL(0, kShonan.costAt(4, result), 1e-3); + auto lambdaMin = kShonan.computeMinEigenValue(result); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, lambdaMin, + 1e-4); // Regression test + const Values SO3Values = kShonan.roundSolution(result); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(SO3Values), 1e-4); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, MakeATangentVector) { + Vector9 v; + v << 1, 2, 3, 4, 5, 6, 7, 8, 9; + Matrix expected(5, 5); + expected << 0, 0, 0, 0, -4, // + 0, 0, 0, 0, -5, // + 0, 0, 0, 0, -6, // + 0, 0, 0, 0, 0, // + 4, 5, 6, 0, 0; + const Vector xi_1 = ShonanAveraging3::MakeATangentVector(5, v, 1); + const auto actual = SOn::Hat(xi_1); + CHECK(assert_equal(expected, actual)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, LiftTo) { + auto I = genericValue(Rot3()); + Values initial{{0, I}, {1, I}, {2, I}}; + Values lifted = ShonanAveraging3::LiftTo(5, initial); + EXPECT(assert_equal(SOn(5), lifted.at(0))); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, CheckWithEigen) { + // control randomness + static std::mt19937 rng(0); + Vector descentDirection = Vector::Random(15); // for use below + const Values randomRotations = kShonan.initializeRandomly(rng); + Values random = ShonanAveraging3::LiftTo(3, randomRotations); + + // Optimize + const Values Qstar3 = kShonan.tryOptimizingAt(3, random); + + // Compute Eigenvalue with Spectra solver + double lambda = kShonan.computeMinEigenValue(Qstar3); + + // Check Eigenvalue with slow Eigen version, converts matrix A to dense matrix! + const Matrix S = ShonanAveraging3::StiefelElementMatrix(Qstar3); + auto A = kShonan.computeA(S); + bool computeEigenvectors = false; + Eigen::EigenSolver eigenSolver(Matrix(A), computeEigenvectors); + auto lambdas = eigenSolver.eigenvalues().real(); + double minEigenValue = lambdas(0); + for (int i = 1; i < lambdas.size(); i++) + minEigenValue = min(lambdas(i), minEigenValue); + + // Actual check + EXPECT_DOUBLES_EQUAL(minEigenValue, lambda, 1e-12); + + // Construct test descent direction (as minEigenVector is not predictable + // across platforms, being one from a basically flat 3d- subspace) + + // Check descent + Values initialQ4 = + ShonanAveraging3::LiftwithDescent(4, Qstar3, descentDirection); + EXPECT_LONGS_EQUAL(5, initialQ4.size()); + + // TODO(frank): uncomment this regression test: currently not repeatable + // across platforms. + // Matrix expected(4, 4); + // expected << 0.0459224, -0.688689, -0.216922, 0.690321, // + // 0.92381, 0.191931, 0.255854, 0.21042, // + // -0.376669, 0.301589, 0.687953, 0.542111, // + // -0.0508588, 0.630804, -0.643587, 0.43046; + // EXPECT(assert_equal(SOn(expected), initialQ4.at(0), 1e-5)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, initializeWithDescent) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(3, randomRotations); + const Values Qstar3 = kShonan.tryOptimizingAt(3, random); + Vector minEigenVector; + double lambdaMin = kShonan.computeMinEigenValue(Qstar3, &minEigenVector); + Values initialQ4 = + kShonan.initializeWithDescent(4, Qstar3, minEigenVector, lambdaMin); + EXPECT_LONGS_EQUAL(5, initialQ4.size()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, run) { + auto initial = kShonan.initializeRandomly(kRandomNumberGenerator); + auto result = kShonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(result.first), 1e-3); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, result.second, + 1e-4); // Regression test +} + +/* ************************************************************************* */ +namespace klaus { +// The data in the file is the Colmap solution +const Rot3 wR0(0.9992281076190063, -0.02676080288219576, -0.024497002638379624, + -0.015064701622500615); +const Rot3 wR1(0.998239108728862, -0.049543805396343954, -0.03232420352077356, + -0.004386230477751116); +const Rot3 wR2(0.9925378735259738, -0.07993768981394891, 0.0825062894866454, + -0.04088089479075661); +} // namespace klaus + +TEST(ShonanAveraging3, runKlaus) { + using namespace klaus; + + // Initialize a Shonan instance without the Karcher mean + ShonanAveraging3::Parameters parameters; + parameters.setKarcherWeight(0); + + // Load 3 pose example taken in Klaus by Shicong + static const ShonanAveraging3 shonan = + fromExampleName("Klaus3.g2o", parameters); + + // Check nr poses + EXPECT_LONGS_EQUAL(3, shonan.nrUnknowns()); + + // Colmap uses the Y-down vision frame, and the first 3 rotations are close to + // identity. We check that below. Note tolerance is quite high. + static const Rot3 identity; + EXPECT(assert_equal(identity, wR0, 0.2)); + EXPECT(assert_equal(identity, wR1, 0.2)); + EXPECT(assert_equal(identity, wR2, 0.2)); + + // Get measurements + const Rot3 R01 = shonan.measured(0); + const Rot3 R12 = shonan.measured(1); + const Rot3 R02 = shonan.measured(2); + + // Regression test to make sure data did not change. + EXPECT(assert_equal(Rot3(0.9995433591728293, -0.022048798853273946, + -0.01796327847857683, 0.010210006313668573), + R01)); + + // Check Colmap solution agrees OK with relative rotation measurements. + EXPECT(assert_equal(R01, wR0.between(wR1), 0.1)); + EXPECT(assert_equal(R12, wR1.between(wR2), 0.1)); + EXPECT(assert_equal(R02, wR0.between(wR2), 0.1)); + + // Run Shonan (with prior on first rotation) + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, shonan.cost(result.first), 1e-2); + EXPECT_DOUBLES_EQUAL(-9.2259161494467889e-05, result.second, + 1e-4); // Regression + + // Get Shonan solution in new frame R (R for result) + const Rot3 rR0 = result.first.at(0); + const Rot3 rR1 = result.first.at(1); + const Rot3 rR2 = result.first.at(2); + + // rR0 = rRw * wR0 => rRw = rR0 * wR0.inverse() + // rR1 = rRw * wR1 + // rR2 = rRw * wR2 + + const Rot3 rRw = rR0 * wR0.inverse(); + EXPECT(assert_equal(rRw * wR1, rR1, 0.1)) + EXPECT(assert_equal(rRw * wR2, rR2, 0.1)) +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, runKlausKarcher) { + using namespace klaus; + + // Load 3 pose example taken in Klaus by Shicong + static const ShonanAveraging3 shonan = fromExampleName("Klaus3.g2o"); + + // Run Shonan (with Karcher mean prior) + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, shonan.cost(result.first), 1e-2); + EXPECT_DOUBLES_EQUAL(-1.361402670507772e-05, result.second, + 1e-4); // Regression test + + // Get Shonan solution in new frame R (R for result) + const Rot3 rR0 = result.first.at(0); + const Rot3 rR1 = result.first.at(1); + const Rot3 rR2 = result.first.at(2); + + const Rot3 rRw = rR0 * wR0.inverse(); + EXPECT(assert_equal(rRw * wR1, rR1, 0.1)) + EXPECT(assert_equal(rRw * wR2, rR2, 0.1)) +} + +/* ************************************************************************* */ +TEST(ShonanAveraging2, runKlausKarcher) { + // Load 2D toy example + auto lmParams = LevenbergMarquardtParams::CeresDefaults(); + // lmParams.setVerbosityLM("SUMMARY"); + string g2oFile = findExampleDataFile("noisyToyGraph.txt"); + ShonanAveraging2::Parameters parameters(lmParams); + auto measurements = parseMeasurements(g2oFile); + ShonanAveraging2 shonan(measurements, parameters); + EXPECT_LONGS_EQUAL(4, shonan.nrUnknowns()); + + // Check graph building + NonlinearFactorGraph graph = shonan.buildGraphAt(2); + EXPECT_LONGS_EQUAL(6, graph.size()); + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 2); + EXPECT_DOUBLES_EQUAL(0.0008211, shonan.cost(result.first), 1e-6); + EXPECT_DOUBLES_EQUAL(0, result.second, 1e-10); // certificate! +} + +/* ************************************************************************* */ +// Test alpha/beta/gamma prior weighting. +TEST(ShonanAveraging3, PriorWeights) { + auto lmParams = LevenbergMarquardtParams::CeresDefaults(); + ShonanAveraging3::Parameters params(lmParams); + EXPECT_DOUBLES_EQUAL(0, params.alpha, 1e-9); + EXPECT_DOUBLES_EQUAL(1, params.beta, 1e-9); + EXPECT_DOUBLES_EQUAL(0, params.gamma, 1e-9); + double alpha = 100.0, beta = 200.0, gamma = 300.0; + params.setAnchorWeight(alpha); + params.setKarcherWeight(beta); + params.setGaugesWeight(gamma); + EXPECT_DOUBLES_EQUAL(alpha, params.alpha, 1e-9); + EXPECT_DOUBLES_EQUAL(beta, params.beta, 1e-9); + EXPECT_DOUBLES_EQUAL(gamma, params.gamma, 1e-9); + params.setKarcherWeight(0); + static const ShonanAveraging3 shonan = fromExampleName("Klaus3.g2o", params); + for (auto i : {0,1,2}) { + const auto& m = shonan.measurement(i); + auto isotropic = + boost::static_pointer_cast(m.noiseModel()); + CHECK(isotropic != nullptr); + EXPECT_LONGS_EQUAL(3, isotropic->dim()); + EXPECT_DOUBLES_EQUAL(0.2, isotropic->sigma(), 1e-9); + } + auto I = genericValue(Rot3()); + Values initial{{0, I}, {1, I}, {2, I}}; + EXPECT_DOUBLES_EQUAL(3.0756, shonan.cost(initial), 1e-4); + auto result = shonan.run(initial, 3, 3); + EXPECT_DOUBLES_EQUAL(0.0015, shonan.cost(result.first), 1e-4); +} +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanFactor.cpp b/gtsam/sfm/tests/testShonanFactor.cpp new file mode 100644 index 000000000..ef94c5cf4 --- /dev/null +++ b/gtsam/sfm/tests/testShonanFactor.cpp @@ -0,0 +1,121 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * testFrobeniusFactor.cpp + * + * @file testFrobeniusFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Check evaluateError for various Frobenius norm + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +//****************************************************************************** +namespace so3 { +SO3 id; +Vector3 v1 = (Vector(3) << 0.1, 0, 0).finished(); +SO3 R1 = SO3::Expmap(v1); +Vector3 v2 = (Vector(3) << 0.01, 0.02, 0.03).finished(); +SO3 R2 = SO3::Expmap(v2); +SO3 R12 = R1.between(R2); +} // namespace so3 + +//****************************************************************************** +namespace submanifold { +SO4 id; +Vector6 v1 = (Vector(6) << 0, 0, 0, 0.1, 0, 0).finished(); +SO3 R1 = SO3::Expmap(v1.tail<3>()); +SO4 Q1 = SO4::Expmap(v1); +Vector6 v2 = (Vector(6) << 0, 0, 0, 0.01, 0.02, 0.03).finished(); +SO3 R2 = SO3::Expmap(v2.tail<3>()); +SO4 Q2 = SO4::Expmap(v2); +SO3 R12 = R1.between(R2); +} // namespace submanifold + +/* ************************************************************************* */ +TEST(ShonanFactor3, evaluateError) { + auto model = noiseModel::Isotropic::Sigma(3, 1.2); + for (const size_t p : {5, 4, 3}) { + Matrix M = Matrix::Identity(p, p); + M.topLeftCorner(3, 3) = submanifold::R1.matrix(); + SOn Q1(M); + M.topLeftCorner(3, 3) = submanifold::R2.matrix(); + SOn Q2(M); + auto factor = ShonanFactor3(1, 2, Rot3(::so3::R12.matrix()), p, model); + Matrix H1, H2; + factor.evaluateError(Q1, Q2, H1, H2); + + // Test derivatives + Values values; + values.insert(1, Q1); + values.insert(2, Q2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); + } +} + +/* ************************************************************************* */ +TEST(ShonanFactor3, equivalenceToSO3) { + using namespace ::submanifold; + auto R12 = ::so3::R12.retract(Vector3(0.1, 0.2, -0.1)); + auto model = noiseModel::Isotropic::Sigma(6, 1.2); // wrong dimension + auto factor3 = FrobeniusBetweenFactor(1, 2, R12, model); + auto factor4 = ShonanFactor3(1, 2, Rot3(R12.matrix()), 4, model); + const Matrix3 E3(factor3.evaluateError(R1, R2).data()); + const Matrix43 E4( + factor4.evaluateError(SOn(Q1.matrix()), SOn(Q2.matrix())).data()); + EXPECT(assert_equal((Matrix)E4.topLeftCorner<3, 3>(), E3, 1e-9)); + EXPECT(assert_equal((Matrix)E4.row(3), Matrix13::Zero(), 1e-9)); +} + +/* ************************************************************************* */ +TEST(ShonanFactor2, evaluateError) { + auto model = noiseModel::Isotropic::Sigma(1, 1.2); + const Rot2 R1(0.3), R2(0.5), R12(0.2); + for (const size_t p : {5, 4, 3, 2}) { + Matrix M = Matrix::Identity(p, p); + M.topLeftCorner(2, 2) = R1.matrix(); + SOn Q1(M); + M.topLeftCorner(2, 2) = R2.matrix(); + SOn Q2(M); + auto factor = ShonanFactor2(1, 2, R12, p, model); + Matrix H1, H2; + factor.evaluateError(Q1, Q2, H1, H2); + + // Test derivatives + Values values; + values.insert(1, Q1); + values.insert(2, Q2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); + } +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanGaugeFactor.cpp b/gtsam/sfm/tests/testShonanGaugeFactor.cpp new file mode 100644 index 000000000..344394b9c --- /dev/null +++ b/gtsam/sfm/tests/testShonanGaugeFactor.cpp @@ -0,0 +1,106 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanGaugeFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Unit tests for ShonanGaugeFactor class + */ + +#include + +#include + +#include +#include + +using namespace std; +using namespace gtsam; + +/* ************************************************************************* */ +// Check dimensions of all low-dim GaugeFactors +TEST(ShonanAveraging, GaugeFactorLows) { + constexpr Key key(123); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 2, 2).dim()); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 3, 2).dim()); + EXPECT_LONGS_EQUAL(1, ShonanGaugeFactor(key, 4, 2).dim()); // SO(4-2) -> 1 + EXPECT_LONGS_EQUAL(3, ShonanGaugeFactor(key, 5, 2).dim()); // SO(5-2) -> 3 + + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 3, 3).dim()); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 4, 3).dim()); + EXPECT_LONGS_EQUAL(1, ShonanGaugeFactor(key, 5, 3).dim()); // SO(5-3) -> 1 +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(6) +TEST(ShonanAveraging, GaugeFactorSO6) { + constexpr Key key(666); + ShonanGaugeFactor factor(key, 6); // For SO(6) + Matrix A = Matrix::Zero(3, 15); // SO(6-3) = SO(3) == 3-dimensional gauge + A(0, 0) = 1; // first 2 of 6^th skew column, which has 5 non-zero entries + A(1, 1) = 1; // then we skip 3 tangent dimensions + A(2, 5) = 1; // first of 5th skew colum, which has 4 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(3)); + Values values; + EXPECT_LONGS_EQUAL(3, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(7) +TEST(ShonanAveraging, GaugeFactorSO7) { + constexpr Key key(777); + ShonanGaugeFactor factor(key, 7); // For SO(7) + Matrix A = Matrix::Zero(6, 21); // SO(7-3) = SO(4) == 6-dimensional gauge + A(0, 0) = 1; // first 3 of 7^th skew column, which has 6 non-zero entries + A(1, 1) = 1; + A(2, 2) = 1; // then we skip 3 tangent dimensions + A(3, 6) = 1; // first 2 of 6^th skew column, which has 5 non-zero entries + A(4, 7) = 1; // then we skip 3 tangent dimensions + A(5, 11) = 1; // first of 5th skew colum, which has 4 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(6)); + Values values; + EXPECT_LONGS_EQUAL(6, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(6), with base SO(2) +TEST(ShonanAveraging, GaugeFactorSO6over2) { + constexpr Key key(602); + double gamma = 4; + ShonanGaugeFactor factor(key, 6, 2, gamma); // For SO(6), base SO(2) + Matrix A = Matrix::Zero(6, 15); // SO(6-2) = SO(4) == 6-dimensional gauge + A(0, 0) = 2; // first 3 of 6^th skew column, which has 5 non-zero entries + A(1, 1) = 2; + A(2, 2) = 2; // then we skip only 2 tangent dimensions + A(3, 5) = 2; // first 2 of 5^th skew column, which has 4 non-zero entries + A(4, 6) = 2; // then we skip only 2 tangent dimensions + A(5, 9) = 2; // first of 4th skew colum, which has 3 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(6)); + Values values; + EXPECT_LONGS_EQUAL(6, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testTranslationFactor.cpp b/gtsam/sfm/tests/testTranslationFactor.cpp index 37e8b6c0f..3ff76ac5c 100644 --- a/gtsam/sfm/tests/testTranslationFactor.cpp +++ b/gtsam/sfm/tests/testTranslationFactor.cpp @@ -51,8 +51,6 @@ TEST(TranslationFactor, ZeroError) { // Verify we get the expected error Vector expected = (Vector3() << 0, 0, 0).finished(); EXPECT(assert_equal(expected, actualError, 1e-9)); - - } /* ************************************************************************* */ @@ -67,13 +65,13 @@ TEST(TranslationFactor, NonZeroError) { Vector actualError(factor.evaluateError(T1, T2)); // verify we get the expected error - Vector expected = (Vector3() << -1, 1/sqrt(2), 1/sqrt(2)).finished(); + Vector expected = (Vector3() << -1, 1 / sqrt(2), 1 / sqrt(2)).finished(); EXPECT(assert_equal(expected, actualError, 1e-9)); } /* ************************************************************************* */ -Vector factorError(const Point3& T1, const Point3& T2, - const TranslationFactor& factor) { +Vector factorError(const Point3 &T1, const Point3 &T2, + const TranslationFactor &factor) { return factor.evaluateError(T1, T2); } diff --git a/gtsam/slam/BetweenFactor.h b/gtsam/slam/BetweenFactor.h index 0f5aa6a4c..a594e95d5 100644 --- a/gtsam/slam/BetweenFactor.h +++ b/gtsam/slam/BetweenFactor.h @@ -21,6 +21,14 @@ #include #include +#ifdef _WIN32 +#define BETWEENFACTOR_VISIBILITY +#else +// This will trigger a LNKxxxx on MSVC, so disable for MSVC build +// Please refer to https://github.com/borglab/gtsam/blob/develop/Using-GTSAM-EXPORT.md +#define BETWEENFACTOR_VISIBILITY GTSAM_EXPORT +#endif + namespace gtsam { /** @@ -67,9 +75,11 @@ namespace gtsam { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } - /** implement functions needed for Testable */ + /// @} + /// @name Testable + /// @{ - /** print */ + /// print with optional string void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "BetweenFactor(" << keyFormatter(this->key1()) << "," @@ -78,15 +88,17 @@ namespace gtsam { this->noiseModel_->print(" noise model: "); } - /** equals */ + /// assert equality up to a tolerance bool equals(const NonlinearFactor& expected, double tol=1e-9) const override { const This *e = dynamic_cast (&expected); return e != nullptr && Base::equals(*e, tol) && traits::Equals(this->measured_, e->measured_, tol); } - /** implement functions needed to derive from Factor */ + /// @} + /// @name NoiseModelFactor2 methods + /// @{ - /** vector of errors */ + /// evaluate error, returns vector of errors size of tangent space Vector evaluateError(const T& p1, const T& p2, boost::optional H1 = boost::none, boost::optional H2 = boost::none) const override { T hx = traits::Between(p1, p2, H1, H2); // h(x) @@ -102,15 +114,15 @@ namespace gtsam { #endif } - /** return the measured */ + /// @} + /// @name Standard interface + /// @{ + + /// return the measurement const VALUE& measured() const { return measured_; } - - /** number of variables attached to this factor */ - std::size_t size() const { - return 2; - } + /// @} private: diff --git a/gtsam/slam/EssentialMatrixConstraint.h b/gtsam/slam/EssentialMatrixConstraint.h index 943db7207..d21ead31f 100644 --- a/gtsam/slam/EssentialMatrixConstraint.h +++ b/gtsam/slam/EssentialMatrixConstraint.h @@ -87,11 +87,6 @@ public: return measuredE_; } - /** number of variables attached to this factor */ - std::size_t size() const { - return 2; - } - private: /** Serialization function */ diff --git a/gtsam/slam/FrobeniusFactor.cpp b/gtsam/slam/FrobeniusFactor.cpp index 904addb03..5697a0cd6 100644 --- a/gtsam/slam/FrobeniusFactor.cpp +++ b/gtsam/slam/FrobeniusFactor.cpp @@ -18,100 +18,38 @@ #include -#include -#include -#include - -#include -#include -#include - using namespace std; namespace gtsam { //****************************************************************************** -boost::shared_ptr ConvertPose3NoiseModel( - const SharedNoiseModel& model, size_t d, bool defaultToUnit) { +boost::shared_ptr +ConvertNoiseModel(const SharedNoiseModel &model, size_t d, bool defaultToUnit) { double sigma = 1.0; if (model != nullptr) { - if (model->dim() != 6) { - if (!defaultToUnit) - throw std::runtime_error("Can only convert Pose3 noise models"); - } else { - auto sigmas = model->sigmas().head(3).eval(); - if (sigmas(1) != sigmas(0) || sigmas(2) != sigmas(0)) { - if (!defaultToUnit) + auto sigmas = model->sigmas(); + size_t n = sigmas.size(); + if (n == 1) { + sigma = sigmas(0); // Rot2 + goto exit; + } + if (n == 3 || n == 6) { + sigma = sigmas(2); // Pose2, Rot3, or Pose3 + if (sigmas(0) != sigma || sigmas(1) != sigma) { + if (!defaultToUnit) { throw std::runtime_error("Can only convert isotropic rotation noise"); - } else { - sigma = sigmas(0); + } } + goto exit; + } + if (!defaultToUnit) { + throw std::runtime_error("Can only convert Pose2/Pose3 noise models"); } } +exit: return noiseModel::Isotropic::Sigma(d, sigma); } //****************************************************************************** -FrobeniusWormholeFactor::FrobeniusWormholeFactor(Key j1, Key j2, const Rot3& R12, - size_t p, - const SharedNoiseModel& model) - : NoiseModelFactor2(ConvertPose3NoiseModel(model, p * 3), j1, j2), - M_(R12.matrix()), // 3*3 in all cases - p_(p), // 4 for SO(4) - pp_(p * p), // 16 for SO(4) - dimension_(SOn::Dimension(p)), // 6 for SO(4) - G_(pp_, dimension_) // 16*6 for SO(4) -{ - // Calculate G matrix of vectorized generators - Matrix Z = Matrix::Zero(p, p); - for (size_t j = 0; j < dimension_; j++) { - const auto X = SOn::Hat(Eigen::VectorXd::Unit(dimension_, j)); - G_.col(j) = Eigen::Map(X.data(), pp_, 1); - } - assert(noiseModel()->dim() == 3 * p_); -} -//****************************************************************************** -Vector FrobeniusWormholeFactor::evaluateError( - const SOn& Q1, const SOn& Q2, boost::optional H1, - boost::optional H2) const { - gttic(FrobeniusWormholeFactorP_evaluateError); - - const Matrix& M1 = Q1.matrix(); - const Matrix& M2 = Q2.matrix(); - assert(M1.rows() == p_ && M2.rows() == p_); - - const size_t dim = 3 * p_; // Stiefel manifold dimension - Vector fQ2(dim), hQ1(dim); - - // Vectorize and extract only d leftmost columns, i.e. vec(M2*P) - fQ2 << Eigen::Map(M2.data(), dim, 1); - - // Vectorize M1*P*R12 - const Matrix Q1PR12 = M1.leftCols<3>() * M_; - hQ1 << Eigen::Map(Q1PR12.data(), dim, 1); - - // If asked, calculate Jacobian as (M \otimes M1) * G - if (H1) { - const size_t p2 = 2 * p_; - Matrix RPxQ = Matrix::Zero(dim, pp_); - RPxQ.block(0, 0, p_, dim) << M1 * M_(0, 0), M1 * M_(1, 0), M1 * M_(2, 0); - RPxQ.block(p_, 0, p_, dim) << M1 * M_(0, 1), M1 * M_(1, 1), M1 * M_(2, 1); - RPxQ.block(p2, 0, p_, dim) << M1 * M_(0, 2), M1 * M_(1, 2), M1 * M_(2, 2); - *H1 = -RPxQ * G_; - } - if (H2) { - const size_t p2 = 2 * p_; - Matrix PxQ = Matrix::Zero(dim, pp_); - PxQ.block(0, 0, p_, p_) = M2; - PxQ.block(p_, p_, p_, p_) = M2; - PxQ.block(p2, p2, p_, p_) = M2; - *H2 = PxQ * G_; - } - - return fQ2 - hQ1; -} - -//****************************************************************************** - -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/slam/FrobeniusFactor.h b/gtsam/slam/FrobeniusFactor.h index 6b2ef67fc..1fc37c785 100644 --- a/gtsam/slam/FrobeniusFactor.h +++ b/gtsam/slam/FrobeniusFactor.h @@ -18,6 +18,7 @@ #pragma once +#include #include #include #include @@ -25,23 +26,24 @@ namespace gtsam { /** - * When creating (any) FrobeniusFactor we convert a 6-dimensional Pose3 - * BetweenFactor noise model into an 9 or 16-dimensional isotropic noise + * When creating (any) FrobeniusFactor we can convert a Rot/Pose + * BetweenFactor noise model into a n-dimensional isotropic noise * model used to weight the Frobenius norm. If the noise model passed is - * null we return a Dim-dimensional isotropic noise model with sigma=1.0. If - * not, we we check if the 3-dimensional noise model on rotations is - * isotropic. If it is, we extend to 'Dim' dimensions, otherwise we throw an + * null we return a n-dimensional isotropic noise model with sigma=1.0. If + * not, we we check if the d-dimensional noise model on rotations is + * isotropic. If it is, we extend to 'n' dimensions, otherwise we throw an * error. If defaultToUnit == false throws an exception on unexepcted input. */ - GTSAM_EXPORT boost::shared_ptr ConvertPose3NoiseModel( - const SharedNoiseModel& model, size_t d, bool defaultToUnit = true); +GTSAM_EXPORT boost::shared_ptr +ConvertNoiseModel(const SharedNoiseModel &model, size_t n, + bool defaultToUnit = true); /** * FrobeniusPrior calculates the Frobenius norm between a given matrix and an * element of SO(3) or SO(4). */ template -class FrobeniusPrior : public NoiseModelFactor1 { +class GTSAM_EXPORT FrobeniusPrior : public NoiseModelFactor1 { enum { Dim = Rot::VectorN2::RowsAtCompileTime }; using MatrixNN = typename Rot::MatrixNN; Eigen::Matrix vecM_; ///< vectorized matrix to approximate @@ -50,7 +52,7 @@ class FrobeniusPrior : public NoiseModelFactor1 { /// Constructor FrobeniusPrior(Key j, const MatrixNN& M, const SharedNoiseModel& model = nullptr) - : NoiseModelFactor1(ConvertPose3NoiseModel(model, Dim), j) { + : NoiseModelFactor1(ConvertNoiseModel(model, Dim), j) { vecM_ << Eigen::Map(M.data(), Dim, 1); } @@ -66,13 +68,13 @@ class FrobeniusPrior : public NoiseModelFactor1 { * The template argument can be any fixed-size SO. */ template -class FrobeniusFactor : public NoiseModelFactor2 { +class GTSAM_EXPORT FrobeniusFactor : public NoiseModelFactor2 { enum { Dim = Rot::VectorN2::RowsAtCompileTime }; public: /// Constructor FrobeniusFactor(Key j1, Key j2, const SharedNoiseModel& model = nullptr) - : NoiseModelFactor2(ConvertPose3NoiseModel(model, Dim), j1, + : NoiseModelFactor2(ConvertNoiseModel(model, Dim), j1, j2) {} /// Error is just Frobenius norm between rotation matrices. @@ -92,21 +94,51 @@ class FrobeniusFactor : public NoiseModelFactor2 { * and in fact only SO3 and SO4 really work, as we need SO::AdjointMap. */ template -class FrobeniusBetweenFactor : public NoiseModelFactor2 { +class GTSAM_EXPORT FrobeniusBetweenFactor : public NoiseModelFactor2 { Rot R12_; ///< measured rotation between R1 and R2 Eigen::Matrix R2hat_H_R1_; ///< fixed derivative of R2hat wrpt R1 enum { Dim = Rot::VectorN2::RowsAtCompileTime }; public: - /// Constructor + /// @name Constructor + /// @{ + + /// Construct from two keys and measured rotation FrobeniusBetweenFactor(Key j1, Key j2, const Rot& R12, const SharedNoiseModel& model = nullptr) : NoiseModelFactor2( - ConvertPose3NoiseModel(model, Dim), j1, j2), + ConvertNoiseModel(model, Dim), j1, j2), R12_(R12), R2hat_H_R1_(R12.inverse().AdjointMap()) {} + /// @} + /// @name Testable + /// @{ + + /// print with optional string + void + print(const std::string &s, + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override { + std::cout << s << "FrobeniusBetweenFactor<" << demangle(typeid(Rot).name()) + << ">(" << keyFormatter(this->key1()) << "," + << keyFormatter(this->key2()) << ")\n"; + traits::Print(R12_, " R12: "); + this->noiseModel_->print(" noise model: "); + } + + /// assert equality up to a tolerance + bool equals(const NonlinearFactor &expected, + double tol = 1e-9) const override { + auto e = dynamic_cast(&expected); + return e != nullptr && NoiseModelFactor2::equals(*e, tol) && + traits::Equals(this->R12_, e->R12_, tol); + } + + /// @} + /// @name NoiseModelFactor2 methods + /// @{ + /// Error is Frobenius norm between R1*R12 and R2. Vector evaluateError(const Rot& R1, const Rot& R2, boost::optional H1 = boost::none, @@ -117,29 +149,7 @@ class FrobeniusBetweenFactor : public NoiseModelFactor2 { if (H1) *H1 = -vec_H_R2hat * R2hat_H_R1_; return error; } -}; - -/** - * FrobeniusWormholeFactor is a BetweenFactor that moves in SO(p), but will - * land on the SO(3) sub-manifold of SO(p) at the global minimum. It projects - * the SO(p) matrices down to a Stiefel manifold of p*d matrices. - * TODO(frank): template on D=2 or 3 - */ -class GTSAM_EXPORT FrobeniusWormholeFactor : public NoiseModelFactor2 { - Matrix M_; ///< measured rotation between R1 and R2 - size_t p_, pp_, dimension_; ///< dimensionality constants - Matrix G_; ///< matrix of vectorized generators - - public: - /// Constructor. Note we convert to 3*p-dimensional noise model. - FrobeniusWormholeFactor(Key j1, Key j2, const Rot3& R12, size_t p = 4, - const SharedNoiseModel& model = nullptr); - - /// Error is Frobenius norm between Q1*P*R12 and Q2*P, where P=[I_3x3;0] - /// projects down from SO(p) to the Stiefel manifold of px3 matrices. - Vector evaluateError(const SOn& Q1, const SOn& Q2, - boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const override; + /// @} }; } // namespace gtsam diff --git a/gtsam/slam/KarcherMeanFactor-inl.h b/gtsam/slam/KarcherMeanFactor-inl.h index f10cc7e42..c81a9adc5 100644 --- a/gtsam/slam/KarcherMeanFactor-inl.h +++ b/gtsam/slam/KarcherMeanFactor-inl.h @@ -58,20 +58,22 @@ T FindKarcherMean(std::initializer_list&& rotations) { template template -KarcherMeanFactor::KarcherMeanFactor(const CONTAINER& keys, int d) - : NonlinearFactor(keys) { +KarcherMeanFactor::KarcherMeanFactor(const CONTAINER &keys, int d, + boost::optional beta) + : NonlinearFactor(keys), d_(static_cast(d)) { if (d <= 0) { throw std::invalid_argument( "KarcherMeanFactor needs dimension for dynamic types."); } - // Create the constant Jacobian made of D*D identity matrices, - // where D is the dimensionality of the manifold. - const auto I = Eigen::MatrixXd::Identity(d, d); + // Create the constant Jacobian made of d*d identity matrices, + // where d is the dimensionality of the manifold. + Matrix A = Matrix::Identity(d, d); + if (beta) A *= std::sqrt(*beta); std::map terms; for (Key j : keys) { - terms[j] = I; + terms[j] = A; } - jacobian_ = - boost::make_shared(terms, Eigen::VectorXd::Zero(d)); + whitenedJacobian_ = + boost::make_shared(terms, Vector::Zero(d)); } } // namespace gtsam \ No newline at end of file diff --git a/gtsam/slam/KarcherMeanFactor.h b/gtsam/slam/KarcherMeanFactor.h index 54b3930d4..b7cd3b11a 100644 --- a/gtsam/slam/KarcherMeanFactor.h +++ b/gtsam/slam/KarcherMeanFactor.h @@ -30,44 +30,51 @@ namespace gtsam { * PriorFactors. */ template -T FindKarcherMean(const std::vector>& rotations); +T FindKarcherMean(const std::vector> &rotations); -template -T FindKarcherMean(std::initializer_list&& rotations); +template T FindKarcherMean(std::initializer_list &&rotations); /** * The KarcherMeanFactor creates a constraint on all SO(n) variables with * given keys that the Karcher mean (see above) will stay the same. Note the * mean itself is irrelevant to the constraint and is not a parameter: the * constraint is implemented as enforcing that the sum of local updates is - * equal to zero, hence creating a rank-dim constraint. Note it is implemented as - * a soft constraint, as typically it is used to fix a gauge freedom. + * equal to zero, hence creating a rank-dim constraint. Note it is implemented + * as a soft constraint, as typically it is used to fix a gauge freedom. * */ -template -class KarcherMeanFactor : public NonlinearFactor { +template class KarcherMeanFactor : public NonlinearFactor { + // Compile time dimension: can be -1 + enum { D = traits::dimension }; + + // Runtime dimension: always >=0 + size_t d_; + /// Constant Jacobian made of d*d identity matrices - boost::shared_ptr jacobian_; + boost::shared_ptr whitenedJacobian_; - enum {D = traits::dimension}; - - public: - /// Construct from given keys. +public: + /** + * Construct from given keys. + * If parameter beta is given, it acts as a precision = 1/sigma^2, and + * the Jacobian will be multiplied with 1/sigma = sqrt(beta). + */ template - KarcherMeanFactor(const CONTAINER& keys, int d=D); + KarcherMeanFactor(const CONTAINER &keys, int d = D, + boost::optional beta = boost::none); /// Destructor virtual ~KarcherMeanFactor() {} /// Calculate the error of the factor: always zero - double error(const Values& c) const override { return 0; } + double error(const Values &c) const override { return 0; } /// get the dimension of the factor (number of rows on linearization) - size_t dim() const override { return D; } + size_t dim() const override { return d_; } /// linearize to a GaussianFactor - boost::shared_ptr linearize(const Values& c) const override { - return jacobian_; + boost::shared_ptr linearize(const Values &c) const override { + return whitenedJacobian_; } }; // \KarcherMeanFactor -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/slam/SmartProjectionFactor.h b/gtsam/slam/SmartProjectionFactor.h index a7d85dd08..bc01f5d85 100644 --- a/gtsam/slam/SmartProjectionFactor.h +++ b/gtsam/slam/SmartProjectionFactor.h @@ -443,25 +443,6 @@ public: /** return the farPoint state */ bool isFarPoint() const { return result_.farPoint(); } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - // It does not make sense to optimize for a camera where the pose would not be - // the actual pose of the camera. An unfortunate consequence of deprecating - // this constructor means that we cannot optimize for calibration when the - // camera is offset from the body pose. That would need a new factor with - // (body) pose and calibration as variables. However, that use case is - // unlikely: when a global offset is know, calibration is typically known. - SmartProjectionFactor( - const SharedNoiseModel& sharedNoiseModel, - const boost::optional body_P_sensor, - const SmartProjectionParams& params = SmartProjectionParams()) - : Base(sharedNoiseModel, body_P_sensor), - params_(params), - result_(TriangulationResult::Degenerate()) {} - /// @} -#endif - private: /// Serialization function diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 4a95b4297..d7b067d70 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -21,21 +21,25 @@ #include #include #include + #include #include #include + +#include +#include + +#include + #include #include -#include -#include -#include -#include + #include #include #include -#include #include #include +#include #include #include @@ -49,14 +53,16 @@ using namespace std; namespace fs = boost::filesystem; -using namespace gtsam::symbol_shorthand; +using gtsam::symbol_shorthand::L; +using gtsam::symbol_shorthand::P; +using gtsam::symbol_shorthand::X; #define LINESIZE 81920 namespace gtsam { /* ************************************************************************* */ -string findExampleDataFile(const string& name) { +string findExampleDataFile(const string &name) { // Search source tree and installed location vector rootsToSearch; @@ -73,88 +79,174 @@ string findExampleDataFile(const string& name) { namesToSearch.push_back(name + ".xml"); // Find first name that exists - for(const fs::path& root: rootsToSearch) { - for(const fs::path& name: namesToSearch) { + for (const fs::path root : rootsToSearch) { + for (const fs::path name : namesToSearch) { if (fs::is_regular_file(root / name)) return (root / name).string(); } } // If we did not return already, then we did not find the file - throw invalid_argument( - "gtsam::findExampleDataFile could not find a matching file in\n" - GTSAM_SOURCE_TREE_DATASET_DIR " or\n" - GTSAM_INSTALLED_DATASET_DIR " named\n" + name + ", " + name - + ".graph, or " + name + ".txt"); + throw invalid_argument("gtsam::findExampleDataFile could not find a matching " + "file in\n" GTSAM_SOURCE_TREE_DATASET_DIR + " or\n" GTSAM_INSTALLED_DATASET_DIR " named\n" + + name + ", " + name + ".graph, or " + name + ".txt"); } /* ************************************************************************* */ -string createRewrittenFileName(const string& name) { +string createRewrittenFileName(const string &name) { // Search source tree and installed location if (!exists(fs::path(name))) { throw invalid_argument( - "gtsam::createRewrittenFileName could not find a matching file in\n" - + name); + "gtsam::createRewrittenFileName could not find a matching file in\n" + + name); } fs::path p(name); - fs::path newpath = fs::path(p.parent_path().string()) - / fs::path(p.stem().string() + "-rewritten.txt"); + fs::path newpath = fs::path(p.parent_path().string()) / + fs::path(p.stem().string() + "-rewritten.txt"); return newpath.string(); } /* ************************************************************************* */ -GraphAndValues load2D(pair dataset, int maxID, - bool addNoise, bool smart, NoiseFormat noiseFormat, - KernelFunctionType kernelFunctionType) { - return load2D(dataset.first, dataset.second, maxID, addNoise, smart, - noiseFormat, kernelFunctionType); +// Type for parser functions used in parseLines below. +template +using Parser = + std::function(istream &is, const string &tag)>; + +// Parse a file by calling the parse(is, tag) function for every line. +// The result of calling the function is ignored, so typically parse function +// works via a side effect, like adding a factor into a graph etc. +template +static void parseLines(const string &filename, Parser parse) { + ifstream is(filename.c_str()); + if (!is) + throw invalid_argument("parse: can not find file " + filename); + string tag; + while (is >> tag) { + parse(is, tag); // ignore return value + is.ignore(LINESIZE, '\n'); + } } /* ************************************************************************* */ -// Read noise parameters and interpret them according to flags -static SharedNoiseModel readNoiseModel(ifstream& is, bool smart, - NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType) { - double v1, v2, v3, v4, v5, v6; - is >> v1 >> v2 >> v3 >> v4 >> v5 >> v6; +// Parse types T into a size_t-indexed map +template +map parseToMap(const string &filename, Parser> parse, + size_t maxIndex) { + map result; + Parser> emplace = [&](istream &is, const string &tag) { + if (auto t = parse(is, tag)) { + if (!maxIndex || t->first <= maxIndex) + result.emplace(*t); + } + return boost::none; + }; + parseLines(filename, emplace); + return result; +} +/* ************************************************************************* */ +// Parse a file and push results on a vector +template +static vector parseToVector(const string &filename, Parser parse) { + vector result; + Parser add = [&result, parse](istream &is, const string &tag) { + if (auto t = parse(is, tag)) + result.push_back(*t); + return boost::none; + }; + parseLines(filename, add); + return result; +} + +/* ************************************************************************* */ +boost::optional parseVertexPose(istream &is, const string &tag) { + if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { + size_t id; + double x, y, yaw; + if (!(is >> id >> x >> y >> yaw)) { + throw std::runtime_error("parseVertexPose encountered malformed line"); + } + return IndexedPose(id, Pose2(x, y, yaw)); + } else { + return boost::none; + } +} + +template <> +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexPose, maxIndex); +} + +/* ************************************************************************* */ +boost::optional parseVertexLandmark(istream &is, + const string &tag) { + if (tag == "VERTEX_XY") { + size_t id; + double x, y; + if (!(is >> id >> x >> y)) { + throw std::runtime_error( + "parseVertexLandmark encountered malformed line"); + } + return IndexedLandmark(id, Point2(x, y)); + } else { + return boost::none; + } +} + +template <> +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexLandmark, maxIndex); +} + +/* ************************************************************************* */ +// Interpret noise parameters according to flags +static SharedNoiseModel +createNoiseModel(const Vector6 v, bool smart, NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { if (noiseFormat == NoiseFormatAUTO) { // Try to guess covariance matrix layout - if (v1 != 0.0 && v2 == 0.0 && v3 != 0.0 && v4 != 0.0 && v5 == 0.0 - && v6 == 0.0) { - // NoiseFormatGRAPH + if (v(0) != 0.0 && v(1) == 0.0 && v(2) != 0.0 && // + v(3) != 0.0 && v(4) == 0.0 && v(5) == 0.0) { noiseFormat = NoiseFormatGRAPH; - } else if (v1 != 0.0 && v2 == 0.0 && v3 == 0.0 && v4 != 0.0 && v5 == 0.0 - && v6 != 0.0) { - // NoiseFormatCOV + } else if (v(0) != 0.0 && v(1) == 0.0 && v(2) == 0.0 && // + v(3) != 0.0 && v(4) == 0.0 && v(5) != 0.0) { noiseFormat = NoiseFormatCOV; } else { throw std::invalid_argument( - "load2D: unrecognized covariance matrix format in dataset file. Please specify the noise format."); + "load2D: unrecognized covariance matrix format in dataset file. " + "Please specify the noise format."); } } // Read matrix and check that diagonal entries are non-zero - Matrix M(3, 3); + Matrix3 M; switch (noiseFormat) { case NoiseFormatG2O: case NoiseFormatCOV: - // i.e., [ v1 v2 v3; v2' v4 v5; v3' v5' v6 ] - if (v1 == 0.0 || v4 == 0.0 || v6 == 0.0) + // i.e., [v(0) v(1) v(2); + // v(1)' v(3) v(4); + // v(2)' v(4)' v(5) ] + if (v(0) == 0.0 || v(3) == 0.0 || v(5) == 0.0) throw runtime_error( "load2D::readNoiseModel looks like this is not G2O matrix order"); - M << v1, v2, v3, v2, v4, v5, v3, v5, v6; + M << v(0), v(1), v(2), v(1), v(3), v(4), v(2), v(4), v(5); break; case NoiseFormatTORO: case NoiseFormatGRAPH: // http://www.openslam.org/toro.html // inf_ff inf_fs inf_ss inf_rr inf_fr inf_sr - // i.e., [ v1 v2 v5; v2' v3 v6; v5' v6' v4 ] - if (v1 == 0.0 || v3 == 0.0 || v4 == 0.0) + // i.e., [v(0) v(1) v(4); + // v(1)' v(2) v(5); + // v(4)' v(5)' v(3) ] + if (v(0) == 0.0 || v(2) == 0.0 || v(3) == 0.0) throw invalid_argument( "load2D::readNoiseModel looks like this is not TORO matrix order"); - M << v1, v2, v5, v2, v3, v6, v5, v6, v4; + M << v(0), v(1), v(4), v(1), v(2), v(5), v(4), v(5), v(3); break; default: throw runtime_error("load2D: invalid noise format"); @@ -195,363 +287,445 @@ static SharedNoiseModel readNoiseModel(ifstream& is, bool smart, } } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 /* ************************************************************************* */ -boost::optional parseVertex(istream& is, const string& tag) { - return parseVertexPose(is, tag); -} -#endif +boost::optional parseEdge(istream &is, const string &tag) { + if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") || + (tag == "ODOMETRY")) { -/* ************************************************************************* */ -boost::optional parseVertexPose(istream& is, const string& tag) { - if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { - Key id; + size_t id1, id2; double x, y, yaw; - is >> id >> x >> y >> yaw; - return IndexedPose(id, Pose2(x, y, yaw)); + if (!(is >> id1 >> id2 >> x >> y >> yaw)) { + throw std::runtime_error("parseEdge encountered malformed line"); + } + return IndexedEdge({id1, id2}, Pose2(x, y, yaw)); } else { return boost::none; } } /* ************************************************************************* */ -boost::optional parseVertexLandmark(istream& is, const string& tag) { - if (tag == "VERTEX_XY") { - Key id; - double x, y; - is >> id >> x >> y; - return IndexedLandmark(id, Point2(x, y)); - } else { - return boost::none; +// Measurement parsers are implemented as a functor, as they have several +// options to filter and create the measurement model. +template struct ParseMeasurement; + +/* ************************************************************************* */ +// Converting from Measurement to BetweenFactor is generic +template struct ParseFactor : ParseMeasurement { + ParseFactor(const ParseMeasurement &parent) + : ParseMeasurement(parent) {} + + // We parse a measurement then convert + typename boost::optional::shared_ptr> + operator()(istream &is, const string &tag) { + if (auto m = ParseMeasurement::operator()(is, tag)) + return boost::make_shared>( + m->key1(), m->key2(), m->measured(), m->noiseModel()); + else + return boost::none; } +}; + +/* ************************************************************************* */ +// Pose2 measurement parser +template <> struct ParseMeasurement { + // The arguments + boost::shared_ptr sampler; + size_t maxIndex; + + // For noise model creation + bool smart; + NoiseFormat noiseFormat; + KernelFunctionType kernelFunctionType; + + // If this is not null, will use instead of parsed model: + SharedNoiseModel model; + + // The actual parser + boost::optional> operator()(istream &is, + const string &tag) { + auto edge = parseEdge(is, tag); + if (!edge) + return boost::none; + + // parse noise model + Vector6 v; + is >> v(0) >> v(1) >> v(2) >> v(3) >> v(4) >> v(5); + + // optional filter + size_t id1, id2; + tie(id1, id2) = edge->first; + if (maxIndex && (id1 > maxIndex || id2 > maxIndex)) + return boost::none; + + // Get pose and optionally add noise + Pose2 &pose = edge->second; + if (sampler) + pose = pose.retract(sampler->sample()); + + // emplace measurement + auto modelFromFile = + createNoiseModel(v, smart, noiseFormat, kernelFunctionType); + return BinaryMeasurement(id1, id2, pose, + model ? model : modelFromFile); + } +}; + +/* ************************************************************************* */ +// Create a sampler to corrupt a measurement +boost::shared_ptr createSampler(const SharedNoiseModel &model) { + auto noise = boost::dynamic_pointer_cast(model); + if (!noise) + throw invalid_argument("gtsam::load: invalid noise model for adding noise" + "(current version assumes diagonal noise model)!"); + return boost::shared_ptr(new Sampler(noise)); } /* ************************************************************************* */ -boost::optional parseEdge(istream& is, const string& tag) { - if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") - || (tag == "ODOMETRY")) { - - Key id1, id2; - double x, y, yaw; - is >> id1 >> id2 >> x >> y >> yaw; - return IndexedEdge(pair(id1, id2), Pose2(x, y, yaw)); - } else { - return boost::none; - } +// Implementation of parseMeasurements for Pose2 +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseMeasurement parse{model ? createSampler(model) : nullptr, + maxIndex, true, NoiseFormatAUTO, + KernelFunctionTypeNONE}; + return parseToVector>(filename, parse); } /* ************************************************************************* */ -GraphAndValues load2D(const string& filename, SharedNoiseModel model, Key maxID, - bool addNoise, bool smart, NoiseFormat noiseFormat, - KernelFunctionType kernelFunctionType) { +// Implementation of parseMeasurements for Rot2, which converts from Pose2 - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("load2D: can not find file " + filename); +// Extract Rot2 measurement from Pose2 measurement +static BinaryMeasurement convert(const BinaryMeasurement &p) { + auto gaussian = + boost::dynamic_pointer_cast(p.noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose2 measurements " + "with Gaussian noise models."); + const Matrix3 M = gaussian->covariance(); + return BinaryMeasurement(p.key1(), p.key2(), p.measured().rotation(), + noiseModel::Isotropic::Variance(1, M(2, 2))); +} - Values::shared_ptr initial(new Values); - NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + auto poses = parseMeasurements(filename, model, maxIndex); + std::vector> result; + result.reserve(poses.size()); + for (const auto &p : poses) + result.push_back(convert(p)); + return result; +} - string tag; +/* ************************************************************************* */ +// Implementation of parseFactors for Pose2 +template <> +std::vector::shared_ptr> +parseFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseFactor parse({model ? createSampler(model) : nullptr, maxIndex, + true, NoiseFormatAUTO, KernelFunctionTypeNONE, + nullptr}); + return parseToVector::shared_ptr>(filename, parse); +} - // load the poses and landmarks - while (!is.eof()) { - if (!(is >> tag)) - break; +/* ************************************************************************* */ +using BearingRange2D = BearingRange; - const auto indexed_pose = parseVertexPose(is, tag); - if (indexed_pose) { - Key id = indexed_pose->first; +template <> struct ParseMeasurement { + // arguments + size_t maxIndex; - // optional filter - if (maxID && id >= maxID) - continue; + // The actual parser + boost::optional> + operator()(istream &is, const string &tag) { + if (tag != "BR" && tag != "LANDMARK") + return boost::none; - initial->insert(id, indexed_pose->second); - } - const auto indexed_landmark = parseVertexLandmark(is, tag); - if (indexed_landmark) { - Key id = indexed_landmark->first; - - // optional filter - if (maxID && id >= maxID) - continue; - - initial->insert(id, indexed_landmark->second); - } - is.ignore(LINESIZE, '\n'); - } - is.clear(); /* clears the end-of-file and error flags */ - is.seekg(0, ios::beg); - - // If asked, create a sampler with random number generator - std::unique_ptr sampler; - if (addNoise) { - noiseModel::Diagonal::shared_ptr noise; - if (model) - noise = boost::dynamic_pointer_cast(model); - if (!noise) - throw invalid_argument( - "gtsam::load2D: invalid noise model for adding noise" - "(current version assumes diagonal noise model)!"); - sampler.reset(new Sampler(noise)); - } - - // Parse the pose constraints - Key id1, id2; - bool haveLandmark = false; - const bool useModelInFile = !model; - while (!is.eof()) { - if (!(is >> tag)) - break; - - auto between_pose = parseEdge(is, tag); - if (between_pose) { - std::tie(id1, id2) = between_pose->first; - Pose2& l1Xl2 = between_pose->second; - - // read noise model - SharedNoiseModel modelInFile = readNoiseModel(is, smart, noiseFormat, - kernelFunctionType); - - // optional filter - if (maxID && (id1 >= maxID || id2 >= maxID)) - continue; - - if (useModelInFile) - model = modelInFile; - - if (addNoise) - l1Xl2 = l1Xl2.retract(sampler->sample()); - - // Insert vertices if pure odometry file - if (!initial->exists(id1)) - initial->insert(id1, Pose2()); - if (!initial->exists(id2)) - initial->insert(id2, initial->at(id1) * l1Xl2); - - NonlinearFactor::shared_ptr factor( - new BetweenFactor(id1, id2, l1Xl2, model)); - graph->push_back(factor); - } - // Parse measurements + size_t id1, id2; + is >> id1 >> id2; double bearing, range, bearing_std, range_std; - // A bearing-range measurement if (tag == "BR") { - is >> id1 >> id2 >> bearing >> range >> bearing_std >> range_std; + is >> bearing >> range >> bearing_std >> range_std; } - // A landmark measurement, TODO Frank says: don't know why is converted to bearing-range + // A landmark measurement, converted to bearing-range if (tag == "LANDMARK") { double lmx, lmy; double v1, v2, v3; - is >> id1 >> id2 >> lmx >> lmy >> v1 >> v2 >> v3; + is >> lmx >> lmy >> v1 >> v2 >> v3; // Convert x,y to bearing,range bearing = atan2(lmy, lmx); range = sqrt(lmx * lmx + lmy * lmy); - // In our experience, the x-y covariance on landmark sightings is not very good, so assume - // it describes the uncertainty at a range of 10m, and convert that to bearing/range uncertainty. + // In our experience, the x-y covariance on landmark sightings is not + // very good, so assume it describes the uncertainty at a range of 10m, + // and convert that to bearing/range uncertainty. if (std::abs(v1 - v3) < 1e-4) { bearing_std = sqrt(v1 / 10.0); range_std = sqrt(v1); } else { + // TODO(frank): we are ignoring the non-uniform covariance bearing_std = 1; range_std = 1; - if (!haveLandmark) { - cout - << "Warning: load2D is a very simple dataset loader and is ignoring the\n" - "non-uniform covariance on LANDMARK measurements in this file." - << endl; - haveLandmark = true; - } } } - // Do some common stuff for bearing-range measurements - if (tag == "LANDMARK" || tag == "BR") { + // optional filter + if (maxIndex && id1 > maxIndex) + return boost::none; - // optional filter - if (maxID && id1 >= maxID) - continue; + // Create noise model + auto measurementNoise = noiseModel::Diagonal::Sigmas( + (Vector(2) << bearing_std, range_std).finished()); - // Create noise model - noiseModel::Diagonal::shared_ptr measurementNoise = - noiseModel::Diagonal::Sigmas((Vector(2) << bearing_std, range_std).finished()); + return BinaryMeasurement( + id1, L(id2), BearingRange2D(bearing, range), measurementNoise); + } +}; - // Add to graph - *graph += BearingRangeFactor(id1, L(id2), bearing, range, - measurementNoise); +/* ************************************************************************* */ +GraphAndValues load2D(const string &filename, SharedNoiseModel model, + size_t maxIndex, bool addNoise, bool smart, + NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { + + // Single pass for poses and landmarks. + auto initial = boost::make_shared(); + Parser insert = [maxIndex, &initial](istream &is, const string &tag) { + if (auto indexedPose = parseVertexPose(is, tag)) { + if (!maxIndex || indexedPose->first <= maxIndex) + initial->insert(indexedPose->first, indexedPose->second); + } else if (auto indexedLandmark = parseVertexLandmark(is, tag)) { + if (!maxIndex || indexedLandmark->first <= maxIndex) + initial->insert(L(indexedLandmark->first), indexedLandmark->second); + } + return 0; + }; + parseLines(filename, insert); + + // Single pass for Pose2 and bearing-range factors. + auto graph = boost::make_shared(); + + // Instantiate factor parser + ParseFactor parseBetweenFactor( + {addNoise ? createSampler(model) : nullptr, maxIndex, smart, noiseFormat, + kernelFunctionType, model}); + + // Instantiate bearing-range parser + ParseMeasurement parseBearingRange{maxIndex}; + + // Combine in a single parser that adds factors to `graph`, but also inserts + // new variables into `initial` when needed. + Parser parse = [&](istream &is, const string &tag) { + if (auto f = parseBetweenFactor(is, tag)) { + graph->push_back(*f); + + // Insert vertices if pure odometry file + Key key1 = (*f)->key1(), key2 = (*f)->key2(); + if (!initial->exists(key1)) + initial->insert(key1, Pose2()); + if (!initial->exists(key2)) + initial->insert(key2, initial->at(key1) * (*f)->measured()); + } else if (auto m = parseBearingRange(is, tag)) { + Key key1 = m->key1(), key2 = m->key2(); + BearingRange2D br = m->measured(); + graph->emplace_shared>(key1, key2, br, + m->noiseModel()); // Insert poses or points if they do not exist yet - if (!initial->exists(id1)) - initial->insert(id1, Pose2()); - if (!initial->exists(L(id2))) { - Pose2 pose = initial->at(id1); - Point2 local(cos(bearing) * range, sin(bearing) * range); + if (!initial->exists(key1)) + initial->insert(key1, Pose2()); + if (!initial->exists(key2)) { + Pose2 pose = initial->at(key1); + Point2 local = br.bearing() * Point2(br.range(), 0); Point2 global = pose.transformFrom(local); - initial->insert(L(id2), global); + initial->insert(key2, global); } } - is.ignore(LINESIZE, '\n'); - } + return 0; + }; + + parseLines(filename, parse); return make_pair(graph, initial); } /* ************************************************************************* */ -GraphAndValues load2D_robust(const string& filename, - noiseModel::Base::shared_ptr& model, int maxID) { - return load2D(filename, model, maxID); +GraphAndValues load2D(pair dataset, size_t maxIndex, + bool addNoise, bool smart, NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { + return load2D(dataset.first, dataset.second, maxIndex, addNoise, smart, + noiseFormat, kernelFunctionType); } /* ************************************************************************* */ -void save2D(const NonlinearFactorGraph& graph, const Values& config, - const noiseModel::Diagonal::shared_ptr model, const string& filename) { +GraphAndValues load2D_robust(const string &filename, + noiseModel::Base::shared_ptr &model, + size_t maxIndex) { + return load2D(filename, model, maxIndex); +} + +/* ************************************************************************* */ +void save2D(const NonlinearFactorGraph &graph, const Values &config, + const noiseModel::Diagonal::shared_ptr model, + const string &filename) { fstream stream(filename.c_str(), fstream::out); // save poses - - for(const Values::ConstKeyValuePair& key_value: config) { - const Pose2& pose = key_value.value.cast(); + for (const Values::ConstKeyValuePair key_value : config) { + const Pose2 &pose = key_value.value.cast(); stream << "VERTEX2 " << key_value.key << " " << pose.x() << " " << pose.y() - << " " << pose.theta() << endl; + << " " << pose.theta() << endl; } // save edges - Matrix R = model->R(); - Matrix RR = trans(R) * R; //prod(trans(R),R); - for(boost::shared_ptr factor_: graph) { - boost::shared_ptr > factor = - boost::dynamic_pointer_cast >(factor_); + // TODO(frank): why don't we use model in factor? + Matrix3 R = model->R(); + Matrix3 RR = R.transpose() * R; + for (auto f : graph) { + auto factor = boost::dynamic_pointer_cast>(f); if (!factor) continue; - Pose2 pose = factor->measured().inverse(); + const Pose2 pose = factor->measured().inverse(); stream << "EDGE2 " << factor->key2() << " " << factor->key1() << " " - << pose.x() << " " << pose.y() << " " << pose.theta() << " " << RR(0, 0) - << " " << RR(0, 1) << " " << RR(1, 1) << " " << RR(2, 2) << " " - << RR(0, 2) << " " << RR(1, 2) << endl; + << pose.x() << " " << pose.y() << " " << pose.theta() << " " + << RR(0, 0) << " " << RR(0, 1) << " " << RR(1, 1) << " " << RR(2, 2) + << " " << RR(0, 2) << " " << RR(1, 2) << endl; } stream.close(); } /* ************************************************************************* */ -GraphAndValues readG2o(const string& g2oFile, const bool is3D, +GraphAndValues readG2o(const string &g2oFile, const bool is3D, KernelFunctionType kernelFunctionType) { if (is3D) { return load3D(g2oFile); } else { // just call load2D - int maxID = 0; + size_t maxIndex = 0; bool addNoise = false; bool smart = true; - return load2D(g2oFile, SharedNoiseModel(), maxID, addNoise, smart, + return load2D(g2oFile, SharedNoiseModel(), maxIndex, addNoise, smart, NoiseFormatG2O, kernelFunctionType); } } /* ************************************************************************* */ -void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, - const string& filename) { +void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, + const string &filename) { fstream stream(filename.c_str(), fstream::out); + // Use a lambda here to more easily modify behavior in future. + auto index = [](gtsam::Key key) { return Symbol(key).index(); }; + // save 2D poses - for (const auto& key_value : estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Pose2& pose = p->value(); - stream << "VERTEX_SE2 " << key_value.key << " " << pose.x() << " " - << pose.y() << " " << pose.theta() << endl; + for (const auto key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Pose2 &pose = p->value(); + stream << "VERTEX_SE2 " << index(key_value.key) << " " << pose.x() << " " + << pose.y() << " " << pose.theta() << endl; } // save 3D poses - for(const auto& key_value: estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Pose3& pose = p->value(); + for (const auto key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Pose3 &pose = p->value(); const Point3 t = pose.translation(); const auto q = pose.rotation().toQuaternion(); - stream << "VERTEX_SE3:QUAT " << key_value.key << " " << t.x() << " " - << t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " " - << q.z() << " " << q.w() << endl; + stream << "VERTEX_SE3:QUAT " << index(key_value.key) << " " << t.x() << " " + << t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " " + << q.z() << " " << q.w() << endl; } // save 2D landmarks - for(const auto& key_value: estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Point2& point = p->value(); - stream << "VERTEX_XY " << key_value.key << " " << point.x() << " " - << point.y() << endl; + for (const auto key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Point2 &point = p->value(); + stream << "VERTEX_XY " << index(key_value.key) << " " << point.x() << " " + << point.y() << endl; } // save 3D landmarks - for(const auto& key_value: estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Point3& point = p->value(); - stream << "VERTEX_TRACKXYZ " << key_value.key << " " << point.x() << " " - << point.y() << " " << point.z() << endl; + for (const auto key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Point3 &point = p->value(); + stream << "VERTEX_TRACKXYZ " << index(key_value.key) << " " << point.x() + << " " << point.y() << " " << point.z() << endl; } // save edges (2D or 3D) - for(const auto& factor_: graph) { - boost::shared_ptr > factor = - boost::dynamic_pointer_cast >(factor_); - if (factor){ + for (const auto &factor_ : graph) { + auto factor = boost::dynamic_pointer_cast>(factor_); + if (factor) { SharedNoiseModel model = factor->noiseModel(); - boost::shared_ptr gaussianModel = + auto gaussianModel = boost::dynamic_pointer_cast(model); - if (!gaussianModel){ + if (!gaussianModel) { model->print("model\n"); throw invalid_argument("writeG2o: invalid noise model!"); } - Matrix Info = gaussianModel->R().transpose() * gaussianModel->R(); + Matrix3 Info = gaussianModel->R().transpose() * gaussianModel->R(); Pose2 pose = factor->measured(); //.inverse(); - stream << "EDGE_SE2 " << factor->key1() << " " << factor->key2() << " " - << pose.x() << " " << pose.y() << " " << pose.theta(); - for (int i = 0; i < 3; i++){ - for (int j = i; j < 3; j++){ + stream << "EDGE_SE2 " << index(factor->key1()) << " " + << index(factor->key2()) << " " << pose.x() << " " << pose.y() + << " " << pose.theta(); + for (size_t i = 0; i < 3; i++) { + for (size_t j = i; j < 3; j++) { stream << " " << Info(i, j); } } stream << endl; } - boost::shared_ptr< BetweenFactor > factor3D = - boost::dynamic_pointer_cast< BetweenFactor >(factor_); + auto factor3D = boost::dynamic_pointer_cast>(factor_); - if (factor3D){ + if (factor3D) { SharedNoiseModel model = factor3D->noiseModel(); boost::shared_ptr gaussianModel = boost::dynamic_pointer_cast(model); - if (!gaussianModel){ + if (!gaussianModel) { model->print("model\n"); throw invalid_argument("writeG2o: invalid noise model!"); } - Matrix Info = gaussianModel->R().transpose() * gaussianModel->R(); + Matrix6 Info = gaussianModel->R().transpose() * gaussianModel->R(); const Pose3 pose3D = factor3D->measured(); const Point3 p = pose3D.translation(); const auto q = pose3D.rotation().toQuaternion(); - stream << "EDGE_SE3:QUAT " << factor3D->key1() << " " << factor3D->key2() - << " " << p.x() << " " << p.y() << " " << p.z() << " " << q.x() - << " " << q.y() << " " << q.z() << " " << q.w(); + stream << "EDGE_SE3:QUAT " << index(factor3D->key1()) << " " + << index(factor3D->key2()) << " " << p.x() << " " << p.y() << " " + << p.z() << " " << q.x() << " " << q.y() << " " << q.z() << " " + << q.w(); - Matrix InfoG2o = I_6x6; - InfoG2o.block(0,0,3,3) = Info.block(3,3,3,3); // cov translation - InfoG2o.block(3,3,3,3) = Info.block(0,0,3,3); // cov rotation - InfoG2o.block(0,3,3,3) = Info.block(0,3,3,3); // off diagonal - InfoG2o.block(3,0,3,3) = Info.block(3,0,3,3); // off diagonal + Matrix6 InfoG2o = I_6x6; + InfoG2o.block<3, 3>(0, 0) = Info.block<3, 3>(3, 3); // cov translation + InfoG2o.block<3, 3>(3, 3) = Info.block<3, 3>(0, 0); // cov rotation + InfoG2o.block<3, 3>(0, 3) = Info.block<3, 3>(0, 3); // off diagonal + InfoG2o.block<3, 3>(3, 0) = Info.block<3, 3>(3, 0); // off diagonal - for (int i = 0; i < 6; i++){ - for (int j = i; j < 6; j++){ + for (size_t i = 0; i < 6; i++) { + for (size_t j = i; j < 6; j++) { stream << " " << InfoG2o(i, j); } } @@ -562,153 +736,215 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, } /* ************************************************************************* */ -static Rot3 NormalizedRot3(double w, double x, double y, double z) { +// parse quaternion in x,y,z,w order, and normalize to unit length +istream &operator>>(istream &is, Quaternion &q) { + double x, y, z, w; + is >> x >> y >> z >> w; const double norm = sqrt(w * w + x * x + y * y + z * z), f = 1.0 / norm; - return Rot3::Quaternion(f * w, f * x, f * y, f * z); + q = Quaternion(f * w, f * x, f * y, f * z); + return is; } /* ************************************************************************* */ -std::map parse3DPoses(const string& filename) { - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("parse3DPoses: can not find file " + filename); - - std::map poses; - while (!is.eof()) { - char buf[LINESIZE]; - is.getline(buf, LINESIZE); - istringstream ls(buf); - string tag; - ls >> tag; - - if (tag == "VERTEX3") { - Key id; - double x, y, z, roll, pitch, yaw; - ls >> id >> x >> y >> z >> roll >> pitch >> yaw; - poses.emplace(id, Pose3(Rot3::Ypr(yaw, pitch, roll), {x, y, z})); - } - if (tag == "VERTEX_SE3:QUAT") { - Key id; - double x, y, z, qx, qy, qz, qw; - ls >> id >> x >> y >> z >> qx >> qy >> qz >> qw; - poses.emplace(id, Pose3(NormalizedRot3(qw, qx, qy, qz), {x, y, z})); - } - } - return poses; +// parse Rot3 from roll, pitch, yaw +istream &operator>>(istream &is, Rot3 &R) { + double yaw, pitch, roll; + is >> roll >> pitch >> yaw; // notice order ! + R = Rot3::Ypr(yaw, pitch, roll); + return is; } /* ************************************************************************* */ -std::map parse3DLandmarks(const string& filename) { - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("parse3DLandmarks: can not find file " + filename); +boost::optional> parseVertexPose3(istream &is, + const string &tag) { + if (tag == "VERTEX3") { + size_t id; + double x, y, z; + Rot3 R; + is >> id >> x >> y >> z >> R; + return make_pair(id, Pose3(R, {x, y, z})); + } else if (tag == "VERTEX_SE3:QUAT") { + size_t id; + double x, y, z; + Quaternion q; + is >> id >> x >> y >> z >> q; + return make_pair(id, Pose3(q, {x, y, z})); + } else + return boost::none; +} - std::map landmarks; - while (!is.eof()) { - char buf[LINESIZE]; - is.getline(buf, LINESIZE); - istringstream ls(buf); - string tag; - ls >> tag; - - if (tag == "VERTEX_TRACKXYZ") { - Key id; - double x, y, z; - ls >> id >> x >> y >> z; - landmarks.emplace(id, Point3(x, y, z)); - } - } - return landmarks; +template <> +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexPose3, maxIndex); } /* ************************************************************************* */ -BetweenFactorPose3s parse3DFactors( - const string& filename, - const noiseModel::Diagonal::shared_ptr& corruptingNoise) { - ifstream is(filename.c_str()); - if (!is) throw invalid_argument("parse3DFactors: can not find file " + filename); +boost::optional> parseVertexPoint3(istream &is, + const string &tag) { + if (tag == "VERTEX_TRACKXYZ") { + size_t id; + double x, y, z; + is >> id >> x >> y >> z; + return make_pair(id, Point3(x, y, z)); + } else + return boost::none; +} - boost::optional sampler; - if (corruptingNoise) { - sampler = Sampler(corruptingNoise); - } +template <> +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexPoint3, maxIndex); +} - std::vector::shared_ptr> factors; - while (!is.eof()) { - char buf[LINESIZE]; - is.getline(buf, LINESIZE); - istringstream ls(buf); - string tag; - ls >> tag; +/* ************************************************************************* */ +// Parse a symmetric covariance matrix (onlyupper-triangular part is stored) +istream &operator>>(istream &is, Matrix6 &m) { + for (size_t i = 0; i < 6; i++) + for (size_t j = i; j < 6; j++) { + is >> m(i, j); + m(j, i) = m(i, j); + } + return is; +} +/* ************************************************************************* */ +// Pose3 measurement parser +template <> struct ParseMeasurement { + // The arguments + boost::shared_ptr sampler; + size_t maxIndex; + + // The actual parser + boost::optional> operator()(istream &is, + const string &tag) { + if (tag != "EDGE3" && tag != "EDGE_SE3:QUAT") + return boost::none; + + // parse ids and optionally filter + size_t id1, id2; + is >> id1 >> id2; + if (maxIndex && (id1 > maxIndex || id2 > maxIndex)) + return boost::none; + + Matrix6 m; if (tag == "EDGE3") { - Key id1, id2; - double x, y, z, roll, pitch, yaw; - ls >> id1 >> id2 >> x >> y >> z >> roll >> pitch >> yaw; - Matrix m(6, 6); - for (size_t i = 0; i < 6; i++) - for (size_t j = i; j < 6; j++) ls >> m(i, j); - SharedNoiseModel model = noiseModel::Gaussian::Information(m); - factors.emplace_back(new BetweenFactor( - id1, id2, Pose3(Rot3::Ypr(yaw, pitch, roll), {x, y, z}), model)); - } - if (tag == "EDGE_SE3:QUAT") { - Key id1, id2; - double x, y, z, qx, qy, qz, qw; - ls >> id1 >> id2 >> x >> y >> z >> qx >> qy >> qz >> qw; - Matrix m(6, 6); - for (size_t i = 0; i < 6; i++) { - for (size_t j = i; j < 6; j++) { - double mij; - ls >> mij; - m(i, j) = mij; - m(j, i) = mij; - } - } - Matrix mgtsam(6, 6); + double x, y, z; + Rot3 R; + is >> x >> y >> z >> R >> m; + Pose3 T12(R, {x, y, z}); + // optionally add noise + if (sampler) + T12 = T12.retract(sampler->sample()); - mgtsam.block<3, 3>(0, 0) = m.block<3, 3>(3, 3); // cov rotation - mgtsam.block<3, 3>(3, 3) = m.block<3, 3>(0, 0); // cov translation - mgtsam.block<3, 3>(0, 3) = m.block<3, 3>(0, 3); // off diagonal - mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0); // off diagonal + return BinaryMeasurement(id1, id2, T12, + noiseModel::Gaussian::Information(m)); + } else if (tag == "EDGE_SE3:QUAT") { + double x, y, z; + Quaternion q; + is >> x >> y >> z >> q >> m; + Pose3 T12(q, {x, y, z}); + // optionally add noise + if (sampler) + T12 = T12.retract(sampler->sample()); + + // EDGE_SE3:QUAT stores covariance in t,R order, unlike GTSAM: + Matrix6 mgtsam; + mgtsam.block<3, 3>(0, 0) = m.block<3, 3>(3, 3); // cov rotation + mgtsam.block<3, 3>(3, 3) = m.block<3, 3>(0, 0); // cov translation + mgtsam.block<3, 3>(0, 3) = m.block<3, 3>(0, 3); // off diagonal + mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0); // off diagonal SharedNoiseModel model = noiseModel::Gaussian::Information(mgtsam); - auto R12 = NormalizedRot3(qw, qx, qy, qz); - if (sampler) { - R12 = R12.retract(sampler->sample()); - } - factors.emplace_back(new BetweenFactor( - id1, id2, Pose3(R12, {x, y, z}), model)); - } + return BinaryMeasurement( + id1, id2, T12, noiseModel::Gaussian::Information(mgtsam)); + } else + return boost::none; } - return factors; +}; + +/* ************************************************************************* */ +// Implementation of parseMeasurements for Pose3 +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseMeasurement parse{model ? createSampler(model) : nullptr, + maxIndex}; + return parseToVector>(filename, parse); } /* ************************************************************************* */ -GraphAndValues load3D(const string& filename) { - const auto factors = parse3DFactors(filename); - NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); - for (const auto& factor : factors) { - graph->push_back(factor); - } +// Implementation of parseMeasurements for Rot3, which converts from Pose3 - Values::shared_ptr initial(new Values); +// Extract Rot3 measurement from Pose3 measurement +static BinaryMeasurement convert(const BinaryMeasurement &p) { + auto gaussian = + boost::dynamic_pointer_cast(p.noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose3 measurements " + "with Gaussian noise models."); + const Matrix6 M = gaussian->covariance(); + return BinaryMeasurement( + p.key1(), p.key2(), p.measured().rotation(), + noiseModel::Gaussian::Covariance(M.block<3, 3>(0, 0), true)); +} - const auto poses = parse3DPoses(filename); - for (const auto& key_pose : poses) { - initial->insert(key_pose.first, key_pose.second); - } - const auto landmarks = parse3DLandmarks(filename); - for (const auto& key_landmark : landmarks) { - initial->insert(key_landmark.first, key_landmark.second); - } +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + auto poses = parseMeasurements(filename, model, maxIndex); + std::vector> result; + result.reserve(poses.size()); + for (const auto &p : poses) + result.push_back(convert(p)); + return result; +} + +/* ************************************************************************* */ +// Implementation of parseFactors for Pose3 +template <> +std::vector::shared_ptr> +parseFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseFactor parse({model ? createSampler(model) : nullptr, maxIndex}); + return parseToVector::shared_ptr>(filename, parse); +} + +/* ************************************************************************* */ +GraphAndValues load3D(const string &filename) { + auto graph = boost::make_shared(); + auto initial = boost::make_shared(); + + // Instantiate factor parser. maxIndex is always zero for load3D. + ParseFactor parseFactor({nullptr, 0}); + + // Single pass for variables and factors. Unlike 2D version, does *not* insert + // variables into `initial` if referenced but not present. + Parser parse = [&](istream &is, const string &tag) { + if (auto indexedPose = parseVertexPose3(is, tag)) { + initial->insert(indexedPose->first, indexedPose->second); + } else if (auto indexedLandmark = parseVertexPoint3(is, tag)) { + initial->insert(L(indexedLandmark->first), indexedLandmark->second); + } else if (auto factor = parseFactor(is, tag)) { + graph->push_back(*factor); + } + return 0; + }; + parseLines(filename, parse); return make_pair(graph, initial); } /* ************************************************************************* */ -Rot3 openGLFixedRotation() { // this is due to different convention for cameras in gtsam and openGL +Rot3 openGLFixedRotation() { // this is due to different convention for + // cameras in gtsam and openGL /* R = [ 1 0 0 * 0 -1 0 * 0 0 -1] @@ -721,7 +957,7 @@ Rot3 openGLFixedRotation() { // this is due to different convention for cameras } /* ************************************************************************* */ -Pose3 openGL2gtsam(const Rot3& R, double tx, double ty, double tz) { +Pose3 openGL2gtsam(const Rot3 &R, double tx, double ty, double tz) { Rot3 R90 = openGLFixedRotation(); Rot3 wRc = (R.inverse()).compose(R90); @@ -730,7 +966,7 @@ Pose3 openGL2gtsam(const Rot3& R, double tx, double ty, double tz) { } /* ************************************************************************* */ -Pose3 gtsam2openGL(const Rot3& R, double tx, double ty, double tz) { +Pose3 gtsam2openGL(const Rot3 &R, double tx, double ty, double tz) { Rot3 R90 = openGLFixedRotation(); Rot3 cRw_openGL = R90.compose(R.inverse()); Point3 t_openGL = cRw_openGL.rotate(Point3(-tx, -ty, -tz)); @@ -738,13 +974,13 @@ Pose3 gtsam2openGL(const Rot3& R, double tx, double ty, double tz) { } /* ************************************************************************* */ -Pose3 gtsam2openGL(const Pose3& PoseGTSAM) { +Pose3 gtsam2openGL(const Pose3 &PoseGTSAM) { return gtsam2openGL(PoseGTSAM.rotation(), PoseGTSAM.x(), PoseGTSAM.y(), - PoseGTSAM.z()); + PoseGTSAM.z()); } /* ************************************************************************* */ -bool readBundler(const string& filename, SfmData &data) { +bool readBundler(const string &filename, SfmData &data) { // Load the data file ifstream is(filename.c_str(), ifstream::in); if (!is) { @@ -779,7 +1015,7 @@ bool readBundler(const string& filename, SfmData &data) { // Check for all-zero R, in which case quit if (r11 == 0 && r12 == 0 && r13 == 0) { cout << "Error in readBundler: zero rotation matrix for pose " << i - << endl; + << endl; return false; } @@ -831,7 +1067,7 @@ bool readBundler(const string& filename, SfmData &data) { } /* ************************************************************************* */ -bool readBAL(const string& filename, SfmData &data) { +bool readBAL(const string &filename, SfmData &data) { // Load the data file ifstream is(filename.c_str(), ifstream::in); if (!is) { @@ -879,7 +1115,7 @@ bool readBAL(const string& filename, SfmData &data) { // Get the 3D position float x, y, z; is >> x >> y >> z; - SfmTrack& track = data.tracks[j]; + SfmTrack &track = data.tracks[j]; track.p = Point3(x, y, z); track.r = 0.4f; track.g = 0.4f; @@ -891,7 +1127,7 @@ bool readBAL(const string& filename, SfmData &data) { } /* ************************************************************************* */ -bool writeBAL(const string& filename, SfmData &data) { +bool writeBAL(const string &filename, SfmData &data) { // Open the output file ofstream os; os.open(filename.c_str()); @@ -909,29 +1145,32 @@ bool writeBAL(const string& filename, SfmData &data) { // Write observations os << data.number_cameras() << " " << data.number_tracks() << " " - << nrObservations << endl; + << nrObservations << endl; os << endl; for (size_t j = 0; j < data.number_tracks(); j++) { // for each 3D point j - const SfmTrack& track = data.tracks[j]; + const SfmTrack &track = data.tracks[j]; - for (size_t k = 0; k < track.number_measurements(); k++) { // for each observation of the 3D point j + for (size_t k = 0; k < track.number_measurements(); + k++) { // for each observation of the 3D point j size_t i = track.measurements[k].first; // camera id double u0 = data.cameras[i].calibration().u0(); double v0 = data.cameras[i].calibration().v0(); if (u0 != 0 || v0 != 0) { - cout - << "writeBAL has not been tested for calibration with nonzero (u0,v0)" - << endl; + cout << "writeBAL has not been tested for calibration with nonzero " + "(u0,v0)" + << endl; } - double pixelBALx = track.measurements[k].second.x() - u0; // center of image is the origin - double pixelBALy = -(track.measurements[k].second.y() - v0); // center of image is the origin + double pixelBALx = track.measurements[k].second.x() - + u0; // center of image is the origin + double pixelBALy = -(track.measurements[k].second.y() - + v0); // center of image is the origin Point2 pixelMeasurement(pixelBALx, pixelBALy); - os << i /*camera id*/<< " " << j /*point id*/<< " " - << pixelMeasurement.x() /*u of the pixel*/<< " " - << pixelMeasurement.y() /*v of the pixel*/<< endl; + os << i /*camera id*/ << " " << j /*point id*/ << " " + << pixelMeasurement.x() /*u of the pixel*/ << " " + << pixelMeasurement.y() /*v of the pixel*/ << endl; } } os << endl; @@ -964,46 +1203,47 @@ bool writeBAL(const string& filename, SfmData &data) { return true; } -bool writeBALfromValues(const string& filename, const SfmData &data, - Values& values) { +bool writeBALfromValues(const string &filename, const SfmData &data, + Values &values) { using Camera = PinholeCamera; SfmData dataValues = data; // Store poses or cameras in SfmData size_t nrPoses = values.count(); - if (nrPoses == dataValues.number_cameras()) { // we only estimated camera poses - for (size_t i = 0; i < dataValues.number_cameras(); i++) { // for each camera - Key poseKey = symbol('x', i); - Pose3 pose = values.at(poseKey); + if (nrPoses == + dataValues.number_cameras()) { // we only estimated camera poses + for (size_t i = 0; i < dataValues.number_cameras(); + i++) { // for each camera + Pose3 pose = values.at(X(i)); Cal3Bundler K = dataValues.cameras[i].calibration(); Camera camera(pose, K); dataValues.cameras[i] = camera; } } else { size_t nrCameras = values.count(); - if (nrCameras == dataValues.number_cameras()) { // we only estimated camera poses and calibration - for (size_t i = 0; i < nrCameras; i++) { // for each camera - Key cameraKey = i; // symbol('c',i); + if (nrCameras == dataValues.number_cameras()) { // we only estimated camera + // poses and calibration + for (size_t i = 0; i < nrCameras; i++) { // for each camera + Key cameraKey = i; // symbol('c',i); Camera camera = values.at(cameraKey); dataValues.cameras[i] = camera; } } else { - cout - << "writeBALfromValues: different number of cameras in SfM_dataValues (#cameras " - << dataValues.number_cameras() << ") and values (#cameras " - << nrPoses << ", #poses " << nrCameras << ")!!" - << endl; + cout << "writeBALfromValues: different number of cameras in " + "SfM_dataValues (#cameras " + << dataValues.number_cameras() << ") and values (#cameras " + << nrPoses << ", #poses " << nrCameras << ")!!" << endl; return false; } } // Store 3D points in SfmData - size_t nrPoints = values.count(), nrTracks = dataValues.number_tracks(); + size_t nrPoints = values.count(), + nrTracks = dataValues.number_tracks(); if (nrPoints != nrTracks) { - cout - << "writeBALfromValues: different number of points in SfM_dataValues (#points= " - << nrTracks << ") and values (#points " - << nrPoints << ")!!" << endl; + cout << "writeBALfromValues: different number of points in " + "SfM_dataValues (#points= " + << nrTracks << ") and values (#points " << nrPoints << ")!!" << endl; } for (size_t j = 0; j < nrTracks; j++) { // for each point @@ -1015,7 +1255,7 @@ bool writeBALfromValues(const string& filename, const SfmData &data, dataValues.tracks[j].r = 1.0; dataValues.tracks[j].g = 0.0; dataValues.tracks[j].b = 0.0; - dataValues.tracks[j].p = Point3(0,0,0); + dataValues.tracks[j].p = Point3(0, 0, 0); } } @@ -1023,22 +1263,40 @@ bool writeBALfromValues(const string& filename, const SfmData &data, return writeBAL(filename, dataValues); } -Values initialCamerasEstimate(const SfmData& db) { +Values initialCamerasEstimate(const SfmData &db) { Values initial; size_t i = 0; // NO POINTS: j = 0; - for(const SfmCamera& camera: db.cameras) + for (const SfmCamera &camera : db.cameras) initial.insert(i++, camera); return initial; } -Values initialCamerasAndPointsEstimate(const SfmData& db) { +Values initialCamerasAndPointsEstimate(const SfmData &db) { Values initial; size_t i = 0, j = 0; - for(const SfmCamera& camera: db.cameras) + for (const SfmCamera &camera : db.cameras) initial.insert((i++), camera); - for(const SfmTrack& track: db.tracks) + for (const SfmTrack &track : db.tracks) initial.insert(P(j++), track.p); return initial; } -} // \namespace gtsam +// To be deprecated when pybind wrapper is merged: +BetweenFactorPose3s +parse3DFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, size_t maxIndex) { + return parseFactors(filename, model, maxIndex); +} + +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 +std::map parse3DPoses(const std::string &filename, + size_t maxIndex) { + return parseVariables(filename, maxIndex); +} + +std::map parse3DLandmarks(const std::string &filename, + size_t maxIndex) { + return parseVariables(filename, maxIndex); +} +#endif +} // namespace gtsam diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index 7029a7a9c..53abe55ba 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -20,13 +20,12 @@ #pragma once +#include #include #include #include -#include -#include +#include #include -#include #include #include #include @@ -74,20 +73,42 @@ enum KernelFunctionType { KernelFunctionTypeNONE, KernelFunctionTypeHUBER, KernelFunctionTypeTUKEY }; -/// Return type for auxiliary functions -typedef std::pair IndexedPose; -typedef std::pair IndexedLandmark; -typedef std::pair, Pose2> IndexedEdge; - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 /** - * Parse TORO/G2O vertex "id x y yaw" - * @param is input stream - * @param tag string parsed from input stream, will only parse if vertex type + * Parse variables in a line-based text format (like g2o) into a map. + * Instantiated in .cpp Pose2, Point2, Pose3, and Point3. + * Note the map keys are integer indices, *not* gtsam::Keys. This is is + * different below where landmarks will use L(index) symbols. */ -GTSAM_EXPORT boost::optional parseVertex(std::istream& is, - const std::string& tag); -#endif +template +GTSAM_EXPORT std::map parseVariables(const std::string &filename, + size_t maxIndex = 0); + +/** + * Parse binary measurements in a line-based text format (like g2o) into a + * vector. Instantiated in .cpp for Pose2, Rot2, Pose3, and Rot3. The rotation + * versions parse poses and extract only the rotation part, using the marginal + * covariance as noise model. + */ +template +GTSAM_EXPORT std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); + +/** + * Parse BetweenFactors in a line-based text format (like g2o) into a vector of + * shared pointers. Instantiated in .cpp T equal to Pose2 and Pose3. + */ +template +GTSAM_EXPORT std::vector::shared_ptr> +parseFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); + +/// Return type for auxiliary functions +typedef std::pair IndexedPose; +typedef std::pair IndexedLandmark; +typedef std::pair, Pose2> IndexedEdge; /** * Parse TORO/G2O vertex "id x y yaw" @@ -102,7 +123,6 @@ GTSAM_EXPORT boost::optional parseVertexPose(std::istream& is, * @param is input stream * @param tag string parsed from input stream, will only parse if vertex type */ - GTSAM_EXPORT boost::optional parseVertexLandmark(std::istream& is, const std::string& tag); @@ -114,18 +134,21 @@ GTSAM_EXPORT boost::optional parseVertexLandmark(std::istream& GTSAM_EXPORT boost::optional parseEdge(std::istream& is, const std::string& tag); -/// Return type for load functions -typedef std::pair GraphAndValues; +/// Return type for load functions, which return a graph and initial values. For +/// landmarks, the gtsam::Symbol L(index) is used to insert into the Values. +/// Bearing-range measurements also refer to landmarks with L(index). +using GraphAndValues = + std::pair; /** * Load TORO 2D Graph * @param dataset/model pair as constructed by [dataset] - * @param maxID if non-zero cut out vertices >= maxID + * @param maxIndex if non-zero cut out vertices >= maxIndex * @param addNoise add noise to the edges * @param smart try to reduce complexity of covariance to cheapest model */ GTSAM_EXPORT GraphAndValues load2D( - std::pair dataset, int maxID = 0, + std::pair dataset, size_t maxIndex = 0, bool addNoise = false, bool smart = true, // NoiseFormat noiseFormat = NoiseFormatAUTO, @@ -135,7 +158,7 @@ GTSAM_EXPORT GraphAndValues load2D( * Load TORO/G2O style graph files * @param filename * @param model optional noise model to use instead of one specified by file - * @param maxID if non-zero cut out vertices >= maxID + * @param maxIndex if non-zero cut out vertices >= maxIndex * @param addNoise add noise to the edges * @param smart try to reduce complexity of covariance to cheapest model * @param noiseFormat how noise parameters are stored @@ -143,13 +166,13 @@ GTSAM_EXPORT GraphAndValues load2D( * @return graph and initial values */ GTSAM_EXPORT GraphAndValues load2D(const std::string& filename, - SharedNoiseModel model = SharedNoiseModel(), Key maxID = 0, bool addNoise = + SharedNoiseModel model = SharedNoiseModel(), size_t maxIndex = 0, bool addNoise = false, bool smart = true, NoiseFormat noiseFormat = NoiseFormatAUTO, // KernelFunctionType kernelFunctionType = KernelFunctionTypeNONE); /// @deprecated load2D now allows for arbitrary models and wrapping a robust kernel GTSAM_EXPORT GraphAndValues load2D_robust(const std::string& filename, - noiseModel::Base::shared_ptr& model, int maxID = 0); + noiseModel::Base::shared_ptr& model, size_t maxIndex = 0); /** save 2d graph */ GTSAM_EXPORT void save2D(const NonlinearFactorGraph& graph, @@ -173,21 +196,15 @@ GTSAM_EXPORT GraphAndValues readG2o(const std::string& g2oFile, const bool is3D * @param filename The name of the g2o file to write * @param graph NonlinearFactor graph storing the measurements * @param estimate Values + * + * Note:behavior change in PR #471: to be consistent with load2D and load3D, we + * write the *indices* to file and not the full Keys. This change really only + * affects landmarks, which get read as indices but stored in values with the + * symbol L(index). */ GTSAM_EXPORT void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, const std::string& filename); -/// Parse edges in 3D TORO graph file into a set of BetweenFactors. -using BetweenFactorPose3s = std::vector::shared_ptr>; -GTSAM_EXPORT BetweenFactorPose3s parse3DFactors(const std::string& filename, - const noiseModel::Diagonal::shared_ptr& corruptingNoise=nullptr); - -/// Parse vertices in 3D TORO/g2o graph file into a map of Pose3s. -GTSAM_EXPORT std::map parse3DPoses(const std::string& filename); - -/// Parse landmarks in 3D g2o graph file into a map of Point3s. -GTSAM_EXPORT std::map parse3DLandmarks(const string& filename); - /// Load TORO 3D Graph GTSAM_EXPORT GraphAndValues load3D(const std::string& filename); @@ -324,13 +341,24 @@ GTSAM_EXPORT Values initialCamerasEstimate(const SfmData& db); */ GTSAM_EXPORT Values initialCamerasAndPointsEstimate(const SfmData& db); -/// Aliases for backwards compatibility -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -typedef SfmMeasurement SfM_Measurement; -typedef SiftIndex SIFT_Index; -typedef SfmTrack SfM_Track; -typedef SfmCamera SfM_Camera; -typedef SfmData SfM_data; -#endif +// To be deprecated when pybind wrapper is merged: +using BetweenFactorPose3s = std::vector::shared_ptr>; +GTSAM_EXPORT BetweenFactorPose3s +parse3DFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); -} // namespace gtsam +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 +inline boost::optional parseVertex(std::istream &is, + const std::string &tag) { + return parseVertexPose(is, tag); +} + +GTSAM_EXPORT std::map parse3DPoses(const std::string &filename, + size_t maxIndex = 0); + +GTSAM_EXPORT std::map +parse3DLandmarks(const std::string &filename, size_t maxIndex = 0); + +#endif +} // namespace gtsam diff --git a/gtsam/slam/tests/testDataset.cpp b/gtsam/slam/tests/testDataset.cpp index b6b1776d2..aad9124c5 100644 --- a/gtsam/slam/tests/testDataset.cpp +++ b/gtsam/slam/tests/testDataset.cpp @@ -82,38 +82,73 @@ TEST( dataSet, parseEdge) const auto actual = parseEdge(is, tag); EXPECT(actual); if (actual) { - pair expected(0, 1); + pair expected(0, 1); EXPECT(expected == actual->first); EXPECT(assert_equal(Pose2(2, 3, 4), actual->second)); } } /* ************************************************************************* */ -TEST( dataSet, load2D) -{ +TEST(dataSet, load2D) { ///< The structure where we will save the SfM data const string filename = findExampleDataFile("w100.graph"); NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; boost::tie(graph, initial) = load2D(filename); - EXPECT_LONGS_EQUAL(300,graph->size()); - EXPECT_LONGS_EQUAL(100,initial->size()); - noiseModel::Unit::shared_ptr model = noiseModel::Unit::Create(3); - BetweenFactor expected(1, 0, Pose2(-0.99879,0.0417574,-0.00818381), model); - BetweenFactor::shared_ptr actual = boost::dynamic_pointer_cast< - BetweenFactor >(graph->at(0)); + EXPECT_LONGS_EQUAL(300, graph->size()); + EXPECT_LONGS_EQUAL(100, initial->size()); + auto model = noiseModel::Unit::Create(3); + BetweenFactor expected(1, 0, Pose2(-0.99879, 0.0417574, -0.00818381), + model); + BetweenFactor::shared_ptr actual = + boost::dynamic_pointer_cast>(graph->at(0)); EXPECT(assert_equal(expected, *actual)); + + // Check binary measurements, Pose2 + size_t maxIndex = 5; + auto measurements = parseMeasurements(filename, nullptr, maxIndex); + EXPECT_LONGS_EQUAL(5, measurements.size()); + + // Check binary measurements, Rot2 + auto measurements2 = parseMeasurements(filename); + EXPECT_LONGS_EQUAL(300, measurements2.size()); + + // // Check factor parsing + const auto actualFactors = parseFactors(filename); + for (size_t i : {0, 1, 2, 3, 4, 5}) { + EXPECT(assert_equal( + *boost::dynamic_pointer_cast>(graph->at(i)), + *actualFactors[i], 1e-5)); + } + + // Check pose parsing + const auto actualPoses = parseVariables(filename); + for (size_t j : {0, 1, 2, 3, 4}) { + EXPECT(assert_equal(initial->at(j), actualPoses.at(j), 1e-5)); + } + + // Check landmark parsing + const auto actualLandmarks = parseVariables(filename); + EXPECT_LONGS_EQUAL(0, actualLandmarks.size()); } /* ************************************************************************* */ -TEST( dataSet, load2DVictoriaPark) -{ +TEST(dataSet, load2DVictoriaPark) { const string filename = findExampleDataFile("victoria_park.txt"); NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; + + // Load all boost::tie(graph, initial) = load2D(filename); - EXPECT_LONGS_EQUAL(10608,graph->size()); - EXPECT_LONGS_EQUAL(7120,initial->size()); + EXPECT_LONGS_EQUAL(10608, graph->size()); + EXPECT_LONGS_EQUAL(7120, initial->size()); + + // Restrict keys + size_t maxIndex = 5; + boost::tie(graph, initial) = load2D(filename, nullptr, maxIndex); + EXPECT_LONGS_EQUAL(5, graph->size()); + EXPECT_LONGS_EQUAL(6, initial->size()); // file has 0 as well + EXPECT_LONGS_EQUAL(L(5), graph->at(4)->keys()[1]); } /* ************************************************************************* */ @@ -184,7 +219,7 @@ TEST(dataSet, readG2o3D) { } // Check factor parsing - const auto actualFactors = parse3DFactors(g2oFile); + const auto actualFactors = parseFactors(g2oFile); for (size_t i : {0, 1, 2, 3, 4, 5}) { EXPECT(assert_equal( *boost::dynamic_pointer_cast>(expectedGraph[i]), @@ -192,13 +227,13 @@ TEST(dataSet, readG2o3D) { } // Check pose parsing - const auto actualPoses = parse3DPoses(g2oFile); + const auto actualPoses = parseVariables(g2oFile); for (size_t j : {0, 1, 2, 3, 4}) { EXPECT(assert_equal(poses[j], actualPoses.at(j), 1e-5)); } // Check landmark parsing - const auto actualLandmarks = parse3DLandmarks(g2oFile); + const auto actualLandmarks = parseVariables(g2oFile); for (size_t j : {0, 1, 2, 3, 4}) { EXPECT(assert_equal(poses[j], actualPoses.at(j), 1e-5)); } @@ -259,7 +294,7 @@ TEST(dataSet, readG2oCheckDeterminants) { const string g2oFile = findExampleDataFile("toyExample.g2o"); // Check determinants in factors - auto factors = parse3DFactors(g2oFile); + auto factors = parseFactors(g2oFile); EXPECT_LONGS_EQUAL(6, factors.size()); for (const auto& factor : factors) { const Rot3 R = factor->measured().rotation(); @@ -267,13 +302,13 @@ TEST(dataSet, readG2oCheckDeterminants) { } // Check determinants in initial values - const map poses = parse3DPoses(g2oFile); + const map poses = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(5, poses.size()); for (const auto& key_value : poses) { const Rot3 R = key_value.second.rotation(); EXPECT_DOUBLES_EQUAL(1.0, R.matrix().determinant(), 1e-9); } - const map landmarks = parse3DLandmarks(g2oFile); + const map landmarks = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(0, landmarks.size()); } @@ -282,10 +317,13 @@ TEST(dataSet, readG2oLandmarks) { const string g2oFile = findExampleDataFile("example_with_vertices.g2o"); // Check number of poses and landmarks. Should be 8 each. - const map poses = parse3DPoses(g2oFile); + const map poses = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(8, poses.size()); - const map landmarks = parse3DLandmarks(g2oFile); + const map landmarks = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(8, landmarks.size()); + + auto graphAndValues = load3D(g2oFile); + EXPECT(graphAndValues.second->exists(L(0))); } /* ************************************************************************* */ @@ -538,14 +576,12 @@ TEST( dataSet, writeBALfromValues_Dubrovnik){ Values value; for(size_t i=0; i < readData.number_cameras(); i++){ // for each camera - Key poseKey = symbol('x',i); Pose3 pose = poseChange.compose(readData.cameras[i].pose()); - value.insert(poseKey, pose); + value.insert(X(i), pose); } for(size_t j=0; j < readData.number_tracks(); j++){ // for each point - Key pointKey = P(j); Point3 point = poseChange.transformFrom( readData.tracks[j].p ); - value.insert(pointKey, point); + value.insert(P(j), point); } // Write values and readData to a file @@ -570,13 +606,11 @@ TEST( dataSet, writeBALfromValues_Dubrovnik){ EXPECT(assert_equal(expected,actual,12)); Pose3 expectedPose = camera0.pose(); - Key poseKey = symbol('x',0); - Pose3 actualPose = value.at(poseKey); + Pose3 actualPose = value.at(X(0)); EXPECT(assert_equal(expectedPose,actualPose, 1e-7)); Point3 expectedPoint = track0.p; - Key pointKey = P(0); - Point3 actualPoint = value.at(pointKey); + Point3 actualPoint = value.at(P(0)); EXPECT(assert_equal(expectedPoint,actualPoint, 1e-6)); } diff --git a/gtsam/slam/tests/testFrobeniusFactor.cpp b/gtsam/slam/tests/testFrobeniusFactor.cpp index 9cb0c19fa..321b54c86 100644 --- a/gtsam/slam/tests/testFrobeniusFactor.cpp +++ b/gtsam/slam/tests/testFrobeniusFactor.cpp @@ -188,54 +188,6 @@ TEST(FrobeniusBetweenFactorSO4, evaluateError) { EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); } -//****************************************************************************** -namespace submanifold { -SO4 id; -Vector6 v1 = (Vector(6) << 0, 0, 0, 0.1, 0, 0).finished(); -SO3 R1 = SO3::Expmap(v1.tail<3>()); -SO4 Q1 = SO4::Expmap(v1); -Vector6 v2 = (Vector(6) << 0, 0, 0, 0.01, 0.02, 0.03).finished(); -SO3 R2 = SO3::Expmap(v2.tail<3>()); -SO4 Q2 = SO4::Expmap(v2); -SO3 R12 = R1.between(R2); -} // namespace submanifold - -/* ************************************************************************* */ -TEST(FrobeniusWormholeFactor, evaluateError) { - auto model = noiseModel::Isotropic::Sigma(6, 1.2); // dimension = 6 not 16 - for (const size_t p : {5, 4, 3}) { - Matrix M = Matrix::Identity(p, p); - M.topLeftCorner(3, 3) = submanifold::R1.matrix(); - SOn Q1(M); - M.topLeftCorner(3, 3) = submanifold::R2.matrix(); - SOn Q2(M); - auto factor = - FrobeniusWormholeFactor(1, 2, Rot3(::so3::R12.matrix()), p, model); - Matrix H1, H2; - factor.evaluateError(Q1, Q2, H1, H2); - - // Test derivatives - Values values; - values.insert(1, Q1); - values.insert(2, Q2); - EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); - } -} - -/* ************************************************************************* */ -TEST(FrobeniusWormholeFactor, equivalenceToSO3) { - using namespace ::submanifold; - auto R12 = ::so3::R12.retract(Vector3(0.1, 0.2, -0.1)); - auto model = noiseModel::Isotropic::Sigma(6, 1.2); // wrong dimension - auto factor3 = FrobeniusBetweenFactor(1, 2, R12, model); - auto factor4 = FrobeniusWormholeFactor(1, 2, Rot3(R12.matrix()), 4, model); - const Matrix3 E3(factor3.evaluateError(R1, R2).data()); - const Matrix43 E4( - factor4.evaluateError(SOn(Q1.matrix()), SOn(Q2.matrix())).data()); - EXPECT(assert_equal((Matrix)E4.topLeftCorner<3, 3>(), E3, 1e-9)); - EXPECT(assert_equal((Matrix)E4.row(3), Matrix13::Zero(), 1e-9)); -} - /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/smart/CMakeLists.txt b/gtsam/smart/CMakeLists.txt deleted file mode 100644 index 53c18fe96..000000000 --- a/gtsam/smart/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -# Install headers -file(GLOB smart_headers "*.h") -install(FILES ${smart_headers} DESTINATION include/gtsam/smart) - -# Build tests -add_subdirectory(tests) diff --git a/gtsam/smart/tests/CMakeLists.txt b/gtsam/smart/tests/CMakeLists.txt deleted file mode 100644 index caa3164fa..000000000 --- a/gtsam/smart/tests/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -gtsamAddTestsGlob(smart "test*.cpp" "" "gtsam") diff --git a/gtsam/symbolic/SymbolicBayesTree.h b/gtsam/symbolic/SymbolicBayesTree.h index e28f28764..3fdf1011b 100644 --- a/gtsam/symbolic/SymbolicBayesTree.h +++ b/gtsam/symbolic/SymbolicBayesTree.h @@ -39,6 +39,7 @@ namespace gtsam { typedef boost::shared_ptr shared_ptr; typedef boost::weak_ptr weak_ptr; SymbolicBayesTreeClique() {} + virtual ~SymbolicBayesTreeClique() {} SymbolicBayesTreeClique(const boost::shared_ptr& conditional) : Base(conditional) {} }; diff --git a/gtsam_unstable/discrete/tests/testPlanning.cpp b/gtsam_unstable/discrete/tests/testPlanning.cpp deleted file mode 100644 index 431d7a4ef..000000000 --- a/gtsam_unstable/discrete/tests/testPlanning.cpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - * testPlanning.cpp - * @brief develop code for planning example - * @date Feb 13, 2012 - * @author Frank Dellaert - */ - -//#include -#include -#include -#include -#include - -using namespace std; -using namespace gtsam; - -/* ************************************************************************* */ -TEST_UNSAFE(Planner, allInOne) -{ - // A small planning problem - // First variable is location - DiscreteKey location(0,3), - haveRock(1,2), haveSoil(2,2), haveImage(3,2), - commRock(4,2), commSoil(5,2), commImage(6,2); - - // There are 3 actions: - // Drive, communicate, sample -} - - -/* ************************************************************************* */ -int main() { - TestResult tr; - return TestRegistry::runAllTests(tr); -} -/* ************************************************************************* */ - diff --git a/gtsam_unstable/discrete/tests/testScheduler.cpp b/gtsam_unstable/discrete/tests/testScheduler.cpp index 0be4e4b1f..3f6c6a1e0 100644 --- a/gtsam_unstable/discrete/tests/testScheduler.cpp +++ b/gtsam_unstable/discrete/tests/testScheduler.cpp @@ -75,7 +75,7 @@ DiscreteFactorGraph createExpected() { // Mutual exclusion for students expected.addAllDiff(A, J); - return expected; + return std::move(expected); } /* ************************************************************************* */ diff --git a/gtsam_unstable/geometry/tests/testSimilarity3.cpp b/gtsam_unstable/geometry/tests/testSimilarity3.cpp index b07b5acd6..d23346896 100644 --- a/gtsam_unstable/geometry/tests/testSimilarity3.cpp +++ b/gtsam_unstable/geometry/tests/testSimilarity3.cpp @@ -244,7 +244,7 @@ TEST(Similarity3, GroupAction) { &Similarity3::transformFrom, _1, _2, boost::none, boost::none); Point3 q(1, 2, 3); - for (const auto T : { T1, T2, T3, T4, T5, T6 }) { + for (const auto& T : { T1, T2, T3, T4, T5, T6 }) { Point3 q(1, 0, 0); Matrix H1 = numericalDerivative21(f, T, q); Matrix H2 = numericalDerivative22(f, T, q); diff --git a/gtsam_unstable/gtsam_unstable.h b/gtsam_unstable/gtsam_unstable.h index ef2d16bf0..05b30bb0b 100644 --- a/gtsam_unstable/gtsam_unstable.h +++ b/gtsam_unstable/gtsam_unstable.h @@ -12,10 +12,14 @@ class gtsam::Point2Vector; class gtsam::Rot2; class gtsam::Pose2; class gtsam::Point3; +class gtsam::SO3; +class gtsam::SO4; +class gtsam::SOn; class gtsam::Rot3; class gtsam::Pose3; virtual class gtsam::noiseModel::Base; virtual class gtsam::noiseModel::Gaussian; +virtual class gtsam::noiseModel::Isotropic; virtual class gtsam::imuBias::ConstantBias; virtual class gtsam::NonlinearFactor; virtual class gtsam::NoiseModelFactor; @@ -39,6 +43,7 @@ class gtsam::KeyVector; class gtsam::LevenbergMarquardtParams; class gtsam::ISAM2Params; class gtsam::GaussianDensity; +class gtsam::LevenbergMarquardtOptimizer; namespace gtsam { @@ -282,7 +287,6 @@ virtual class PriorFactor : gtsam::NoiseModelFactor { void serializable() const; // enabling serialization functionality }; - #include template virtual class BetweenFactor : gtsam::NoiseModelFactor { diff --git a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h index 2291642aa..b4645e4ed 100644 --- a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h +++ b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h @@ -117,15 +117,6 @@ public: const NonlinearFactorGraph& graph, const Values& theta, const KeyVector& keys, const GaussianFactorGraph::Eliminate& eliminateFunction = EliminatePreferCholesky); -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - static NonlinearFactorGraph calculateMarginalFactors( - const NonlinearFactorGraph& graph, const Values& theta, const std::set& keys, - const GaussianFactorGraph::Eliminate& eliminateFunction = EliminatePreferCholesky) { - KeyVector keyVector(keys.begin(), keys.end()); - return CalculateMarginalFactors(graph, theta, keyVector, eliminateFunction); - } -#endif - protected: /** A typedef defining an Key-Factor mapping **/ diff --git a/gtsam_unstable/slam/BetweenFactorEM.h b/gtsam_unstable/slam/BetweenFactorEM.h index 4104ba653..d551209c9 100644 --- a/gtsam_unstable/slam/BetweenFactorEM.h +++ b/gtsam_unstable/slam/BetweenFactorEM.h @@ -403,11 +403,6 @@ public: return measured_; } - /** number of variables attached to this factor */ - std::size_t size() const { - return 2; - } - size_t dim() const override { return model_inlier_->R().rows() + model_inlier_->R().cols(); } diff --git a/gtsam_unstable/slam/TransformBtwRobotsUnaryFactor.h b/gtsam_unstable/slam/TransformBtwRobotsUnaryFactor.h index 35080bd42..a17e07f9c 100644 --- a/gtsam_unstable/slam/TransformBtwRobotsUnaryFactor.h +++ b/gtsam_unstable/slam/TransformBtwRobotsUnaryFactor.h @@ -203,11 +203,6 @@ namespace gtsam { /* ************************************************************************* */ - /** number of variables attached to this factor */ - std::size_t size() const { - return 1; - } - size_t dim() const override { return model_->R().rows() + model_->R().cols(); } diff --git a/gtsam_unstable/slam/TransformBtwRobotsUnaryFactorEM.h b/gtsam_unstable/slam/TransformBtwRobotsUnaryFactorEM.h index 2db2844ae..8a56a5d02 100644 --- a/gtsam_unstable/slam/TransformBtwRobotsUnaryFactorEM.h +++ b/gtsam_unstable/slam/TransformBtwRobotsUnaryFactorEM.h @@ -401,11 +401,6 @@ namespace gtsam { /* ************************************************************************* */ - /** number of variables attached to this factor */ - std::size_t size() const { - return 1; - } - size_t dim() const override { return model_inlier_->R().rows() + model_inlier_->R().cols(); } diff --git a/gtsam_unstable/timing/process_shonan_timing_results.py b/gtsam_unstable/timing/process_shonan_timing_results.py new file mode 100644 index 000000000..9cf934dba --- /dev/null +++ b/gtsam_unstable/timing/process_shonan_timing_results.py @@ -0,0 +1,215 @@ +""" +Process timing results from timeShonanAveraging +""" + +import xlrd +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.ticker import FuncFormatter +import heapq +from collections import Counter + +def make_combined_plot(name, p_values, times, costs, min_cost_range=10): + """ Make a plot that combines timing and SO(3) cost. + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times: list of timings (seconds) + costs: list of costs (double) + Will calculate the range of the costs, default minimum range = 10.0 + """ + min_cost = min(costs) + cost_range = max(max(costs)-min_cost,min_cost_range) + fig = plt.figure() + ax1 = fig.add_subplot(111) + ax1.plot(p_values, times, label="time") + ax1.set_ylabel('Time used to optimize \ seconds') + ax1.set_xlabel('p_value') + ax2 = ax1.twinx() + ax2.plot(p_values, costs, 'r', label="cost") + ax2.set_ylabel('Cost at SO(3) form') + ax2.set_xlabel('p_value') + ax2.set_xticks(p_values) + ax2.set_ylim(min_cost, min_cost + cost_range) + plt.title(name, fontsize=12) + ax1.legend(loc="upper left") + ax2.legend(loc="upper right") + plt.interactive(False) + plt.show() + +def make_convergence_plot(name, p_values, times, costs, iter=10): + """ Make a bar that show the success rate for each p_value according to whether the SO(3) cost converges + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times: list of timings (seconds) + costs: list of costs (double) + iter: int of iteration number for each p_value + """ + + max_cost = np.mean(np.array(heapq.nlargest(iter, costs))) + # calculate mean costs for each p value + p_values = list(dict(Counter(p_values)).keys()) + # make sure the iter number + iter = int(len(times)/len(p_values)) + p_mean_cost = [np.mean(np.array(costs[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_max = p_values[p_mean_cost.index(max(p_mean_cost))] + # print(p_mean_cost) + # print(p_max) + + #take mean and make the combined plot + mean_times = [] + mean_costs = [] + for p in p_values: + costs_tmp = costs[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + mean_cost = sum(costs_tmp)/len(costs_tmp) + mean_costs.append(mean_cost) + times_tmp = times[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + mean_time = sum(times_tmp)/len(times_tmp) + mean_times.append(mean_time) + make_combined_plot(name, p_values,mean_times, mean_costs) + + # calculate the convergence rate for each p_value + p_success_rates = [] + if p_mean_cost[0] >= 0.95*np.mean(np.array(costs)) and p_mean_cost[0] <= 1.05*np.mean(np.array(costs)): + p_success_rates = [ 1.0 for p in p_values] + else: + for p in p_values: + if p > p_max: + p_costs = costs[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + # print(p_costs) + converged = [ int(p_cost < 0.3*max_cost) for p_cost in p_costs] + success_rate = sum(converged)/len(converged) + p_success_rates.append(success_rate) + else: + p_success_rates.append(0) + + plt.bar(p_values, p_success_rates, align='center', alpha=0.5) + plt.xticks(p_values) + plt.yticks(np.arange(0, 1.2, 0.2), ['0%', '20%', '40%', '60%', '80%', '100%']) + plt.xlabel("p_value") + plt.ylabel("success rate") + plt.title(name, fontsize=12) + plt.show() + +def make_eigen_and_bound_plot(name, p_values, times1, costPs, cost3s, times2, min_eigens, subounds): + """ Make a plot that combines time for optimizing, time for optimizing and compute min_eigen, + min_eigen, subound (subound = (f_R - f_SDP) / f_SDP). + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times1: list of timings (seconds) + costPs: f_SDP + cost3s: f_R + times2: list of timings (seconds) + min_eigens: list of min_eigen (double) + subounds: list of subound (double) + """ + + if dict(Counter(p_values))[5] != 1: + p_values = list(dict(Counter(p_values)).keys()) + iter = int(len(times1)/len(p_values)) + p_mean_times1 = [np.mean(np.array(times1[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_times2 = [np.mean(np.array(times2[i*iter:(i+1)*iter])) for i in range(len(p_values))] + print("p_values \n", p_values) + print("p_mean_times_opti \n", p_mean_times1) + print("p_mean_times_eig \n", p_mean_times2) + + p_mean_costPs = [np.mean(np.array(costPs[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_cost3s = [np.mean(np.array(cost3s[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_lambdas = [np.mean(np.array(min_eigens[i*iter:(i+1)*iter])) for i in range(len(p_values))] + + print("p_mean_costPs \n", p_mean_costPs) + print("p_mean_cost3s \n", p_mean_cost3s) + print("p_mean_lambdas \n", p_mean_lambdas) + print("*******************************************************************************************************************") + + + else: + plt.figure(1) + plt.ylabel('Time used (seconds)') + plt.xlabel('p_value') + plt.plot(p_values, times1, 'r', label="time for optimizing") + plt.plot(p_values, times2, 'blue', label="time for optimizing and check") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.interactive(False) + plt.show() + + plt.figure(2) + plt.ylabel('Min eigen_value') + plt.xlabel('p_value') + plt.plot(p_values, min_eigens, 'r', label="min_eigen values") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.interactive(False) + plt.show() + + plt.figure(3) + plt.ylabel('sub_bounds') + plt.xlabel('p_value') + plt.plot(p_values, subounds, 'blue', label="sub_bounds") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.show() + +# Process arguments and call plot function +import argparse +import csv +import os + +parser = argparse.ArgumentParser() +parser.add_argument("path") +args = parser.parse_args() + + +file_path = [] +domain = os.path.abspath(args.path) +for info in os.listdir(args.path): + file_path.append(os.path.join(domain, info)) +file_path.sort() +print(file_path) + + +# name of all the plots +names = {} +names[0] = 'tinyGrid3D vertex = 9, edge = 11' +names[1] = 'smallGrid3D vertex = 125, edge = 297' +names[2] = 'parking-garage vertex = 1661, edge = 6275' +names[3] = 'sphere2500 vertex = 2500, edge = 4949' +# names[4] = 'sphere_bignoise vertex = 2200, edge = 8647' +names[5] = 'torus3D vertex = 5000, edge = 9048' +names[6] = 'cubicle vertex = 5750, edge = 16869' +names[7] = 'rim vertex = 10195, edge = 29743' + +# Parse CSV file +for key, name in names.items(): + print(key, name) + + # find according file to process + name_file = None + for path in file_path: + if name[0:3] in path: + name_file = path + if name_file == None: + print("The file %s is not in the path" % name) + continue + + p_values, times1, costPs, cost3s, times2, min_eigens, subounds = [],[],[],[],[],[],[] + with open(name_file) as csvfile: + reader = csv.reader(csvfile, delimiter='\t') + for row in reader: + print(row) + p_values.append(int(row[0])) + times1.append(float(row[1])) + costPs.append(float(row[2])) + cost3s.append(float(row[3])) + if len(row) > 4: + times2.append(float(row[4])) + min_eigens.append(float(row[5])) + subounds.append(float(row[6])) + + #plot + # make_combined_plot(name, p_values, times1, cost3s) + # make_convergence_plot(name, p_values, times1, cost3s) + make_eigen_and_bound_plot(name, p_values, times1, costPs, cost3s, times2, min_eigens, subounds) diff --git a/gtsam_unstable/timing/timeShonanAveraging.cpp b/gtsam_unstable/timing/timeShonanAveraging.cpp new file mode 100644 index 000000000..795961aef --- /dev/null +++ b/gtsam_unstable/timing/timeShonanAveraging.cpp @@ -0,0 +1,182 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanAveraging.cpp + * @date September 2019 + * @author Jing Wu + * @author Frank Dellaert + * @brief Timing script for Shonan Averaging algorithm + */ + +#include "gtsam/base/Matrix.h" +#include "gtsam/base/Vector.h" +#include "gtsam/geometry/Point3.h" +#include "gtsam/geometry/Rot3.h" +#include +#include + +#include + +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +// string g2oFile = findExampleDataFile("toyExample.g2o"); + +// save a single line of timing info to an output stream +void saveData(size_t p, double time1, double costP, double cost3, double time2, + double min_eigenvalue, double suBound, std::ostream* os) { + *os << (int)p << "\t" << time1 << "\t" << costP << "\t" << cost3 << "\t" + << time2 << "\t" << min_eigenvalue << "\t" << suBound << endl; +} + +void checkR(const Matrix& R) { + Matrix R2 = R.transpose(); + Matrix actual_R = R2 * R; + assert_equal(Rot3(),Rot3(actual_R)); +} + +void saveResult(string name, const Values& values) { + ofstream myfile; + myfile.open("shonan_result_of_" + name + ".dat"); + size_t nrSO3 = values.count(); + myfile << "#Type SO3 Number " << nrSO3 << "\n"; + for (int i = 0; i < nrSO3; ++i) { + Matrix R = values.at(i).matrix(); + // Check if the result of R.Transpose*R satisfy orthogonal constraint + checkR(R); + myfile << i; + for (int m = 0; m < 3; ++m) { + for (int n = 0; n < 3; ++n) { + myfile << " " << R(m, n); + } + } + myfile << "\n"; + } + myfile.close(); + cout << "Saved shonan_result.dat file" << endl; +} + +void saveG2oResult(string name, const Values& values, std::map poses) { + ofstream myfile; + myfile.open("shonan_result_of_" + name + ".g2o"); + size_t nrSO3 = values.count(); + for (int i = 0; i < nrSO3; ++i) { + Matrix R = values.at(i).matrix(); + // Check if the result of R.Transpose*R satisfy orthogonal constraint + checkR(R); + myfile << "VERTEX_SE3:QUAT" << " "; + myfile << i << " "; + myfile << poses[i].x() << " " << poses[i].y() << " " << poses[i].z() << " "; + Vector quaternion = Rot3(R).quaternion(); + myfile << quaternion(3) << " " << quaternion(2) << " " << quaternion(1) << " " << quaternion(0); + myfile << "\n"; + } + myfile.close(); + cout << "Saved shonan_result.g2o file" << endl; +} + +void saveResultQuat(const Values& values) { + ofstream myfile; + myfile.open("shonan_result.dat"); + size_t nrSOn = values.count(); + for (int i = 0; i < nrSOn; ++i) { + GTSAM_PRINT(values.at(i)); + Rot3 R = Rot3(values.at(i).matrix()); + float x = R.toQuaternion().x(); + float y = R.toQuaternion().y(); + float z = R.toQuaternion().z(); + float w = R.toQuaternion().w(); + myfile << "QuatSO3 " << i; + myfile << "QuatSO3 " << i << " " << w << " " << x << " " << y << " " << z << "\n"; + myfile.close(); + } +} + +int main(int argc, char* argv[]) { + // primitive argument parsing: + if (argc > 3) { + throw runtime_error("Usage: timeShonanAveraging [g2oFile]"); + } + + string g2oFile; + try { + if (argc > 1) + g2oFile = argv[argc - 1]; + else + g2oFile = string( + "/home/jingwu/git/SESync/data/sphere2500.g2o"); + + } catch (const exception& e) { + cerr << e.what() << '\n'; + exit(1); + } + + // Create a csv output file + size_t pos1 = g2oFile.find("data/"); + size_t pos2 = g2oFile.find(".g2o"); + string name = g2oFile.substr(pos1 + 5, pos2 - pos1 - 5); + cout << name << endl; + ofstream csvFile("shonan_timing_of_" + name + ".csv"); + + // Create Shonan averaging instance from the file. + // ShonanAveragingParameters parameters; + // double sigmaNoiseInRadians = 0 * M_PI / 180; + // parameters.setNoiseSigma(sigmaNoiseInRadians); + static const ShonanAveraging3 kShonan(g2oFile); + + // increase p value and try optimize using Shonan Algorithm. use chrono for + // timing + size_t pMin = 3; + Values Qstar; + Vector minEigenVector; + double CostP = 0, Cost3 = 0, lambdaMin = 0, suBound = 0; + cout << "(int)p" << "\t" << "time1" << "\t" << "costP" << "\t" << "cost3" << "\t" + << "time2" << "\t" << "MinEigenvalue" << "\t" << "SuBound" << endl; + + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + + for (size_t p = pMin; p < 6; p++) { + // Randomly initialize at lowest level, initialize by line search after that + const Values initial = + (p > pMin) ? kShonan.initializeWithDescent(p, Qstar, minEigenVector, + lambdaMin) + : ShonanAveraging::LiftTo(pMin, randomRotations); + chrono::steady_clock::time_point t1 = chrono::steady_clock::now(); + // optimizing + const Values result = kShonan.tryOptimizingAt(p, initial); + chrono::steady_clock::time_point t2 = chrono::steady_clock::now(); + chrono::duration timeUsed1 = + chrono::duration_cast>(t2 - t1); + lambdaMin = kShonan.computeMinEigenValue(result, &minEigenVector); + chrono::steady_clock::time_point t3 = chrono::steady_clock::now(); + chrono::duration timeUsed2 = + chrono::duration_cast>(t3 - t1); + Qstar = result; + CostP = kShonan.costAt(p, result); + const Values SO3Values = kShonan.roundSolution(result); + Cost3 = kShonan.cost(SO3Values); + suBound = (Cost3 - CostP) / CostP; + + saveData(p, timeUsed1.count(), CostP, Cost3, timeUsed2.count(), + lambdaMin, suBound, &cout); + saveData(p, timeUsed1.count(), CostP, Cost3, timeUsed2.count(), + lambdaMin, suBound, &csvFile); + } + saveResult(name, kShonan.roundSolution(Qstar)); + // saveG2oResult(name, kShonan.roundSolution(Qstar), kShonan.Poses()); + return 0; +} diff --git a/tests/testExpressionFactor.cpp b/tests/testExpressionFactor.cpp index d33c7ba1d..e3e37e7c7 100644 --- a/tests/testExpressionFactor.cpp +++ b/tests/testExpressionFactor.cpp @@ -630,6 +630,103 @@ TEST(ExpressionFactor, MultiplyWithInverseFunction) { EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-5, 1e-5); } + +/* ************************************************************************* */ +// Test N-ary variadic template +class TestNaryFactor + : public gtsam::ExpressionFactorN { +private: + using This = TestNaryFactor; + using Base = + gtsam::ExpressionFactorN; + +public: + /// default constructor + TestNaryFactor() = default; + ~TestNaryFactor() override = default; + + TestNaryFactor(gtsam::Key kR1, gtsam::Key kV1, gtsam::Key kR2, gtsam::Key kV2, + const gtsam::SharedNoiseModel &model, const gtsam::Point3& measured) + : Base({kR1, kV1, kR2, kV2}, model, measured) { + this->initialize(expression({kR1, kV1, kR2, kV2})); + } + + /// @return a deep copy of this factor + gtsam::NonlinearFactor::shared_ptr clone() const override { + return boost::static_pointer_cast( + gtsam::NonlinearFactor::shared_ptr(new This(*this))); + } + + // Return measurement expression + gtsam::Expression expression( + const std::array &keys) const override { + gtsam::Expression R1_(keys[0]); + gtsam::Expression V1_(keys[1]); + gtsam::Expression R2_(keys[2]); + gtsam::Expression V2_(keys[3]); + return {gtsam::rotate(R1_, V1_) - gtsam::rotate(R2_, V2_)}; + } + + /** print */ + void print(const std::string &s, + const gtsam::KeyFormatter &keyFormatter = + gtsam::DefaultKeyFormatter) const override { + std::cout << s << "TestNaryFactor(" + << keyFormatter(Factor::keys_[0]) << "," + << keyFormatter(Factor::keys_[1]) << "," + << keyFormatter(Factor::keys_[2]) << "," + << keyFormatter(Factor::keys_[3]) << ")\n"; + gtsam::traits::Print(measured_, " measured: "); + this->noiseModel_->print(" noise model: "); + } + + /** equals */ + bool equals(const gtsam::NonlinearFactor &expected, + double tol = 1e-9) const override { + const This *e = dynamic_cast(&expected); + return e != nullptr && Base::equals(*e, tol) && + gtsam::traits::Equals(measured_,e->measured_, tol); + } + +private: + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE &ar, const unsigned int /*version*/) { + ar &boost::serialization::make_nvp( + "TestNaryFactor", + boost::serialization::base_object(*this)); + ar &BOOST_SERIALIZATION_NVP(measured_); + } +}; + +TEST(ExpressionFactor, variadicTemplate) { + using gtsam::symbol_shorthand::R; + using gtsam::symbol_shorthand::V; + + // Create factor + TestNaryFactor f(R(0),V(0), R(1), V(1), noiseModel::Unit::Create(3), Point3(0,0,0)); + + // Create some values + Values values; + values.insert(R(0), Rot3::Ypr(0.1, 0.2, 0.3)); + values.insert(V(0), Point3(1, 2, 3)); + values.insert(R(1), Rot3::Ypr(0.2, 0.5, 0.2)); + values.insert(V(1), Point3(5, 6, 7)); + + // Check unwhitenedError + std::vector H(4); + Vector actual = f.unwhitenedError(values, H); + EXPECT_LONGS_EQUAL(4, H.size()); + EXPECT(assert_equal(Eigen::Vector3d(-5.63578115, -4.85353243, -1.4801204), actual, 1e-5)); + + EXPECT_CORRECT_FACTOR_JACOBIANS(f, values, 1e-8, 1e-5); +} + + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/tests/testManifold.cpp b/tests/testManifold.cpp index d0b4a1ffa..ac3a09e36 100644 --- a/tests/testManifold.cpp +++ b/tests/testManifold.cpp @@ -149,34 +149,6 @@ TEST(Manifold, DefaultChart) { EXPECT(assert_equal((Vector) Z_3x1, traits::Local(R, R))); } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -//****************************************************************************** -typedef ProductManifold MyPoint2Pair; - -// Define any direct product group to be a model of the multiplicative Group concept -namespace gtsam { -template<> struct traits : internal::ManifoldTraits { - static void Print(const MyPoint2Pair& m, const string& s = "") { - cout << s << "(" << m.first << "," << m.second << ")" << endl; - } - static bool Equals(const MyPoint2Pair& m1, const MyPoint2Pair& m2, double tol = 1e-8) { - return m1 == m2; - } -}; -} - -TEST(Manifold, ProductManifold) { - BOOST_CONCEPT_ASSERT((IsManifold)); - MyPoint2Pair pair1(Point2(0,0),Point2(0,0)); - Vector4 d; - d << 1,2,3,4; - MyPoint2Pair expected(Point2(1,2),Point2(3,4)); - MyPoint2Pair pair2 = pair1.retract(d); - EXPECT(assert_equal(expected,pair2,1e-9)); - EXPECT(assert_equal(d, pair1.localCoordinates(pair2),1e-9)); -} -#endif - //****************************************************************************** int main() { TestResult tr; diff --git a/tests/testNonlinearISAM.cpp b/tests/testNonlinearISAM.cpp index 974806612..4249f5bc4 100644 --- a/tests/testNonlinearISAM.cpp +++ b/tests/testNonlinearISAM.cpp @@ -308,7 +308,7 @@ TEST(testNonlinearISAM, loop_closures ) { // check if edge const auto betweenPose = parseEdge(is, tag); if (betweenPose) { - Key id1, id2; + size_t id1, id2; tie(id1, id2) = betweenPose->first; graph.emplace_shared >(Symbol('x', id2), Symbol('x', id1), betweenPose->second, model); diff --git a/tests/testTranslationRecovery.cpp b/tests/testTranslationRecovery.cpp index 5a98c3bf5..eb34ba803 100644 --- a/tests/testTranslationRecovery.cpp +++ b/tests/testTranslationRecovery.cpp @@ -49,15 +49,17 @@ TEST(TranslationRecovery, BAL) { poses, {{0, 1}, {0, 2}, {1, 2}}); // Check - const Pose3 wTa = poses.at(0), wTb = poses.at(1), - wTc = poses.at(2); - const Point3 Ta = wTa.translation(), Tb = wTb.translation(), - Tc = wTc.translation(); - const Rot3 aRw = wTa.rotation().inverse(); - const Unit3 w_aZb = relativeTranslations.at({0, 1}); - EXPECT(assert_equal(Unit3(Tb - Ta), w_aZb)); - const Unit3 w_aZc = relativeTranslations.at({0, 2}); - EXPECT(assert_equal(Unit3(Tc - Ta), w_aZc)); + Unit3 w_aZb_stored; // measurement between 0 and 1 stored for next unit test + for(auto& unitTranslation : relativeTranslations) { + const Pose3 wTa = poses.at(unitTranslation.key1()), + wTb = poses.at(unitTranslation.key2()); + const Point3 Ta = wTa.translation(), Tb = wTb.translation(); + const Unit3 w_aZb = unitTranslation.measured(); + EXPECT(assert_equal(Unit3(Tb - Ta), w_aZb)); + if(unitTranslation.key1() == 0 && unitTranslation.key2() == 1) { + w_aZb_stored = unitTranslation.measured(); + } + } TranslationRecovery algorithm(relativeTranslations); const auto graph = algorithm.buildGraph(); @@ -69,10 +71,14 @@ TEST(TranslationRecovery, BAL) { // Check result for first two translations, determined by prior EXPECT(assert_equal(Point3(0, 0, 0), result.at(0))); - EXPECT(assert_equal(Point3(2 * w_aZb.point3()), result.at(1))); + EXPECT(assert_equal(Point3(2 * w_aZb_stored.point3()), result.at(1))); // Check that the third translations is correct - Point3 expected = (Tc - Ta) * (scale / (Tb - Ta).norm()); + Point3 Ta = poses.at(0).translation(); + Point3 Tb = poses.at(1).translation(); + Point3 Tc = poses.at(2).translation(); + Point3 expected = + (Tc - Ta) * (scale / (Tb - Ta).norm()); EXPECT(assert_equal(expected, result.at(2), 1e-4)); // TODO(frank): how to get stats back? diff --git a/timing/timeFrobeniusFactor.cpp b/timing/timeShonanFactor.cpp similarity index 70% rename from timing/timeFrobeniusFactor.cpp rename to timing/timeShonanFactor.cpp index 8bd754de6..207d54a4d 100644 --- a/timing/timeFrobeniusFactor.cpp +++ b/timing/timeShonanFactor.cpp @@ -10,15 +10,14 @@ * -------------------------------------------------------------------------- */ /** - * @file timeFrobeniusFactor.cpp - * @brief time FrobeniusFactor with BAL file + * @file timeShonanFactor.cpp + * @brief time ShonanFactor with BAL file * @author Frank Dellaert - * @date June 6, 2015 + * @date 2019 */ #include #include -#include #include #include #include @@ -28,7 +27,7 @@ #include #include #include -#include +#include #include #include @@ -43,7 +42,7 @@ static SharedNoiseModel gNoiseModel = noiseModel::Unit::Create(2); int main(int argc, char* argv[]) { // primitive argument parsing: if (argc > 3) { - throw runtime_error("Usage: timeFrobeniusFactor [g2oFile]"); + throw runtime_error("Usage: timeShonanFactor [g2oFile]"); } string g2oFile; @@ -51,30 +50,28 @@ int main(int argc, char* argv[]) { if (argc > 1) g2oFile = argv[argc - 1]; else - g2oFile = - "/Users/dellaert/git/2019c-notes-shonanrotationaveraging/matlabCode/" - "datasets/randomTorus3D.g2o"; - // g2oFile = findExampleDataFile("sphere_smallnoise.graph"); + g2oFile = findExampleDataFile("sphere_smallnoise.graph"); } catch (const exception& e) { cerr << e.what() << '\n'; exit(1); } // Read G2O file - const auto factors = parse3DFactors(g2oFile); - const auto poses = parse3DPoses(g2oFile); + const auto measurements = parseMeasurements(g2oFile); + const auto poses = parseVariables(g2oFile); // Build graph NonlinearFactorGraph graph; - // graph.add(NonlinearEquality(0, SO4())); + // graph.add(NonlinearEquality(0, SOn::identity(4))); auto priorModel = noiseModel::Isotropic::Sigma(6, 10000); - graph.add(PriorFactor(0, SO4(), priorModel)); - for (const auto& factor : factors) { - const auto& keys = factor->keys(); - const auto& Tij = factor->measured(); - const auto& model = factor->noiseModel(); - graph.emplace_shared( - keys[0], keys[1], Rot3(Tij.rotation().matrix()), 4, model); + graph.add(PriorFactor(0, SOn::identity(4), priorModel)); + auto G = boost::make_shared(SOn::VectorizedGenerators(4)); + for (const auto &m : measurements) { + const auto &keys = m.keys(); + const Rot3 &Rij = m.measured(); + const auto &model = m.noiseModel(); + graph.emplace_shared( + keys[0], keys[1], Rij, 4, model, G); } std::mt19937 rng(42); @@ -95,9 +92,9 @@ int main(int argc, char* argv[]) { for (size_t i = 0; i < 100; i++) { gttic_(optimize); Values initial; - initial.insert(0, SO4()); + initial.insert(0, SOn::identity(4)); for (size_t j = 1; j < poses.size(); j++) { - initial.insert(j, SO4::Random(rng)); + initial.insert(j, SOn::Random(rng, 4)); } LevenbergMarquardtOptimizer lm(graph, initial, params); Values result = lm.optimize(); diff --git a/wrap/Module.cpp b/wrap/Module.cpp index ec02dc412..780c6f8da 100644 --- a/wrap/Module.cpp +++ b/wrap/Module.cpp @@ -248,7 +248,7 @@ void Module::parseMarkup(const std::string& data) { // Create type attributes table and check validity typeAttributes.addClasses(expandedClasses); typeAttributes.addForwardDeclarations(forward_declarations); - for (const TypedefPair p: typedefs) + for (const TypedefPair& p: typedefs) typeAttributes.addType(p.newType); // add Eigen types as template arguments are also checked ? vector eigen;