Merge remote-tracking branch 'upstream/develop' into sim3

release/4.3a0
alexma3312 2020-08-21 21:13:59 -04:00
commit e3bf43856d
624 changed files with 32404 additions and 18128 deletions

127
.github/scripts/python.sh vendored Normal file
View File

@ -0,0 +1,127 @@
#!/bin/bash
##########################################################
# Build and test the GTSAM Python wrapper.
##########################################################
set -x -e
# install TBB with _debug.so files
function install_tbb()
{
TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download
TBB_VERSION=4.4.5
TBB_DIR=tbb44_20160526oss
TBB_SAVEPATH="/tmp/tbb.tgz"
if [ "$(uname)" == "Linux" ]; then
OS_SHORT="lin"
TBB_LIB_DIR="intel64/gcc4.4"
SUDO="sudo"
elif [ "$(uname)" == "Darwin" ]; then
OS_SHORT="osx"
TBB_LIB_DIR=""
SUDO=""
fi
wget "${TBB_BASEURL}/${TBB_VERSION}/${TBB_DIR}_${OS_SHORT}.tgz" -O $TBB_SAVEPATH
tar -C /tmp -xf $TBB_SAVEPATH
TBBROOT=/tmp/$TBB_DIR
# Copy the needed files to the correct places.
# This works correctly for CI builds, instead of setting path variables.
# This is what Homebrew does to install TBB on Macs
$SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/
$SUDO cp -R $TBBROOT/include/ /usr/local/include/
}
if [ -z ${PYTHON_VERSION+x} ]; then
echo "Please provide the Python version to build against!"
exit 127
fi
if [ -z ${WRAPPER+x} ]; then
echo "Please provide the wrapper to build!"
exit 126
fi
PYTHON="python${PYTHON_VERSION}"
if [[ $(uname) == "Darwin" ]]; then
brew install wget
else
# Install a system package required by our library
sudo apt-get install -y wget libicu-dev python3-pip python3-setuptools
fi
PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
case $WRAPPER in
"cython")
BUILD_CYTHON="ON"
BUILD_PYBIND="OFF"
TYPEDEF_POINTS_TO_VECTORS="OFF"
sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/cython/requirements.txt
;;
"pybind")
BUILD_CYTHON="OFF"
BUILD_PYBIND="ON"
TYPEDEF_POINTS_TO_VECTORS="ON"
sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
;;
*)
exit 126
;;
esac
mkdir $GITHUB_WORKSPACE/build
cd $GITHUB_WORKSPACE/build
cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release \
-DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \
-DGTSAM_USE_QUATERNIONS=OFF \
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DGTSAM_INSTALL_CYTHON_TOOLBOX=${BUILD_CYTHON} \
-DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \
-DGTSAM_TYPEDEF_POINTS_TO_VECTORS=${TYPEDEF_POINTS_TO_VECTORS} \
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
-DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \
-DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
make -j$(nproc) install &
while ps -p $! > /dev/null
do
sleep 60
now=$(date +%s)
printf "%d seconds have elapsed\n" $(( (now - start) ))
done
case $WRAPPER in
"cython")
cd $GITHUB_WORKSPACE/build/cython
$PYTHON setup.py install --user --prefix=
cd $GITHUB_WORKSPACE/build/cython/gtsam/tests
$PYTHON -m unittest discover
;;
"pybind")
cd $GITHUB_WORKSPACE/build/python
$PYTHON setup.py install --user --prefix=
cd $GITHUB_WORKSPACE/python/gtsam/tests
$PYTHON -m unittest discover
;;
*)
echo "THIS SHOULD NEVER HAPPEN!"
exit 125
;;
esac

34
.travis.sh → .github/scripts/unix.sh vendored Executable file → Normal file
View File

@ -1,20 +1,25 @@
#!/bin/bash
##########################################################
# Build and test GTSAM for *nix based systems.
# Specifically Linux and macOS.
##########################################################
# install TBB with _debug.so files
function install_tbb()
{
TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download
TBB_VERSION=4.4.2
TBB_DIR=tbb44_20151115oss
TBB_VERSION=4.4.5
TBB_DIR=tbb44_20160526oss
TBB_SAVEPATH="/tmp/tbb.tgz"
if [ "$TRAVIS_OS_NAME" == "linux" ]; then
if [ "$(uname)" == "Linux" ]; then
OS_SHORT="lin"
TBB_LIB_DIR="intel64/gcc4.4"
SUDO="sudo"
elif [ "$TRAVIS_OS_NAME" == "osx" ]; then
OS_SHORT="lin"
elif [ "$(uname)" == "Darwin" ]; then
OS_SHORT="osx"
TBB_LIB_DIR=""
SUDO=""
@ -25,7 +30,7 @@ function install_tbb()
TBBROOT=/tmp/$TBB_DIR
# Copy the needed files to the correct places.
# This works correctly for travis builds, instead of setting path variables.
# This works correctly for CI builds, instead of setting path variables.
# This is what Homebrew does to install TBB on Macs
$SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/
$SUDO cp -R $TBBROOT/include/ /usr/local/include/
@ -38,15 +43,14 @@ function configure()
set -e # Make sure any error makes the script to return an error code
set -x # echo
SOURCE_DIR=`pwd`
BUILD_DIR=build
SOURCE_DIR=$GITHUB_WORKSPACE
BUILD_DIR=$GITHUB_WORKSPACE/build
#env
git clean -fd || true
rm -fr $BUILD_DIR || true
mkdir $BUILD_DIR && cd $BUILD_DIR
install_tbb
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
if [ ! -z "$GCC_VERSION" ]; then
export CC=gcc-$GCC_VERSION
@ -59,11 +63,13 @@ function configure()
-DGTSAM_BUILD_TESTS=${GTSAM_BUILD_TESTS:-OFF} \
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
-DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V41=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \
-DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DCMAKE_VERBOSE_MAKEFILE=OFF
-DBOOST_ROOT=$BOOST_ROOT \
-DBoost_NO_SYSTEM_PATHS=ON \
-DBoost_ARCHITECTURE=-x64
}
@ -71,7 +77,7 @@ function configure()
function finish ()
{
# Print ccache stats
ccache -s
[ -x "$(command -v ccache)" ] && ccache -s
cd $SOURCE_DIR
}

78
.github/workflows/build-linux.yml vendored Normal file
View File

@ -0,0 +1,78 @@
name: Linux CI
on: [push, pull_request]
jobs:
build:
name: ${{ matrix.name }} ${{ matrix.build_type }}
runs-on: ${{ matrix.os }}
env:
CTEST_OUTPUT_ON_FAILURE: ON
CTEST_PARALLEL_LEVEL: 2
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
strategy:
fail-fast: false
matrix:
# Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [
ubuntu-18.04-gcc-5,
ubuntu-18.04-gcc-9,
ubuntu-18.04-clang-9,
]
build_type: [Debug, Release]
build_unstable: [ON]
include:
- name: ubuntu-18.04-gcc-5
os: ubuntu-18.04
compiler: gcc
version: "5"
- name: ubuntu-18.04-gcc-9
os: ubuntu-18.04
compiler: gcc
version: "9"
- name: ubuntu-18.04-clang-9
os: ubuntu-18.04
compiler: clang
version: "9"
steps:
- name: Checkout
uses: actions/checkout@master
- name: Install (Linux)
if: runner.os == 'Linux'
run: |
# LLVM 9 is not in Bionic's repositories so we add the official LLVM repository.
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
fi
sudo apt-get -y update
sudo apt install cmake build-essential pkg-config libpython-dev python-numpy
echo "::set-env name=BOOST_ROOT::$(echo $BOOST_ROOT_1_69_0)"
echo "::set-env name=LD_LIBRARY_PATH::$(echo $BOOST_ROOT_1_69_0/lib)"
if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
echo "::set-env name=CC::gcc-${{ matrix.version }}"
echo "::set-env name=CXX::g++-${{ matrix.version }}"
else
sudo apt-get install -y clang-${{ matrix.version }} g++-multilib
echo "::set-env name=CC::clang-${{ matrix.version }}"
echo "::set-env name=CXX::clang++-${{ matrix.version }}"
fi
- name: Check Boost version
if: runner.os == 'Linux'
run: |
echo "BOOST_ROOT = $BOOST_ROOT"
- name: Build and Test (Linux)
if: runner.os == 'Linux'
run: |
bash .github/scripts/unix.sh -t

51
.github/workflows/build-macos.yml vendored Normal file
View File

@ -0,0 +1,51 @@
name: macOS CI
on: [pull_request]
jobs:
build:
name: ${{ matrix.name }} ${{ matrix.build_type }}
runs-on: ${{ matrix.os }}
env:
CTEST_OUTPUT_ON_FAILURE: ON
CTEST_PARALLEL_LEVEL: 2
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
strategy:
fail-fast: false
matrix:
# Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [
macOS-10.15-xcode-11.3.1,
]
build_type: [Debug, Release]
build_unstable: [ON]
include:
- name: macOS-10.15-xcode-11.3.1
os: macOS-10.15
compiler: xcode
version: "11.3.1"
steps:
- name: Checkout
uses: actions/checkout@master
- name: Install (macOS)
if: runner.os == 'macOS'
run: |
brew install cmake ninja boost
if [ "${{ matrix.compiler }}" = "gcc" ]; then
brew install gcc@${{ matrix.version }}
echo "::set-env name=CC::gcc-${{ matrix.version }}"
echo "::set-env name=CXX::g++-${{ matrix.version }}"
else
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
echo "::set-env name=CC::clang"
echo "::set-env name=CXX::clang++"
fi
- name: Build and Test (macOS)
if: runner.os == 'macOS'
run: |
bash .github/scripts/unix.sh -t

108
.github/workflows/build-python.yml vendored Normal file
View File

@ -0,0 +1,108 @@
name: Python CI
on: [pull_request]
jobs:
build:
name: ${{ matrix.name }} ${{ matrix.build_type }} Python ${{ matrix.python_version }}
runs-on: ${{ matrix.os }}
env:
CTEST_OUTPUT_ON_FAILURE: ON
CTEST_PARALLEL_LEVEL: 2
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
PYTHON_VERSION: ${{ matrix.python_version }}
WRAPPER: ${{ matrix.wrapper }}
strategy:
fail-fast: false
matrix:
# Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [
ubuntu-18.04-gcc-5,
# ubuntu-18.04-gcc-9, # TODO Disabled for now because of timeouts
ubuntu-18.04-clang-9,
macOS-10.15-xcode-11.3.1,
ubuntu-18.04-gcc-5-tbb,
]
build_type: [Debug, Release]
python_version: [3]
wrapper: [pybind]
include:
- name: ubuntu-18.04-gcc-5
os: ubuntu-18.04
compiler: gcc
version: "5"
# TODO Disabled for now because of timeouts
# - name: ubuntu-18.04-gcc-9
# os: ubuntu-18.04
# compiler: gcc
# version: "9"
- name: ubuntu-18.04-clang-9
os: ubuntu-18.04
compiler: clang
version: "9"
- name: macOS-10.15-xcode-11.3.1
os: macOS-10.15
compiler: xcode
version: "11.3.1"
- name: ubuntu-18.04-gcc-5-tbb
os: ubuntu-18.04
compiler: gcc
version: "5"
flag: tbb
steps:
- name: Checkout
uses: actions/checkout@master
- name: Install (Linux)
if: runner.os == 'Linux'
run: |
# LLVM 9 is not in Bionic's repositories so we add the official LLVM repository.
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
fi
sudo apt-get -y update
sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev
if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
echo "::set-env name=CC::gcc-${{ matrix.version }}"
echo "::set-env name=CXX::g++-${{ matrix.version }}"
else
sudo apt-get install -y clang-${{ matrix.version }} g++-multilib
echo "::set-env name=CC::clang-${{ matrix.version }}"
echo "::set-env name=CXX::clang++-${{ matrix.version }}"
fi
- name: Install (macOS)
if: runner.os == 'macOS'
run: |
brew install cmake ninja boost
if [ "${{ matrix.compiler }}" = "gcc" ]; then
brew install gcc@${{ matrix.version }}
echo "::set-env name=CC::gcc-${{ matrix.version }}"
echo "::set-env name=CXX::g++-${{ matrix.version }}"
else
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
echo "::set-env name=CC::clang"
echo "::set-env name=CXX::clang++"
fi
- name: Set GTSAM_WITH_TBB Flag
if: matrix.flag == 'tbb'
run: |
echo "::set-env name=GTSAM_WITH_TBB::ON"
echo "GTSAM Uses TBB"
- name: Build (Linux)
if: runner.os == 'Linux'
run: |
bash .github/scripts/python.sh
- name: Build (macOS)
if: runner.os == 'macOS'
run: |
bash .github/scripts/python.sh

112
.github/workflows/build-special.yml vendored Normal file
View File

@ -0,0 +1,112 @@
name: Special Cases CI
on: [pull_request]
jobs:
build:
name: ${{ matrix.name }} ${{ matrix.build_type }}
runs-on: ${{ matrix.os }}
env:
CTEST_OUTPUT_ON_FAILURE: ON
CTEST_PARALLEL_LEVEL: 2
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
GTSAM_BUILD_UNSTABLE: ON
strategy:
fail-fast: false
matrix:
# Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name:
[
ubuntu-gcc-deprecated,
ubuntu-gcc-quaternions,
ubuntu-gcc-tbb,
]
build_type: [Debug, Release]
include:
- name: ubuntu-gcc-deprecated
os: ubuntu-18.04
compiler: gcc
version: "9"
flag: deprecated
- name: ubuntu-gcc-quaternions
os: ubuntu-18.04
compiler: gcc
version: "9"
flag: quaternions
- name: ubuntu-gcc-tbb
os: ubuntu-18.04
compiler: gcc
version: "9"
flag: tbb
steps:
- name: Checkout
uses: actions/checkout@master
- name: Install (Linux)
if: runner.os == 'Linux'
run: |
# LLVM 9 is not in Bionic's repositories so we add the official LLVM repository.
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
fi
sudo apt-get -y update
sudo apt install cmake build-essential pkg-config libpython-dev python-numpy
echo "::set-env name=BOOST_ROOT::$(echo $BOOST_ROOT_1_69_0)"
echo "::set-env name=LD_LIBRARY_PATH::$(echo $BOOST_ROOT_1_69_0/lib)"
if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
echo "::set-env name=CC::gcc-${{ matrix.version }}"
echo "::set-env name=CXX::g++-${{ matrix.version }}"
else
sudo apt-get install -y clang-${{ matrix.version }} g++-multilib
echo "::set-env name=CC::clang-${{ matrix.version }}"
echo "::set-env name=CXX::clang++-${{ matrix.version }}"
fi
- name: Install (macOS)
if: runner.os == 'macOS'
run: |
brew install cmake ninja boost
if [ "${{ matrix.compiler }}" = "gcc" ]; then
brew install gcc@${{ matrix.version }}
echo "::set-env name=CC::gcc-${{ matrix.version }}"
echo "::set-env name=CXX::g++-${{ matrix.version }}"
else
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
echo "::set-env name=CC::clang"
echo "::set-env name=CXX::clang++"
fi
- name: Set Allow Deprecated Flag
if: matrix.flag == 'deprecated'
run: |
echo "::set-env name=GTSAM_ALLOW_DEPRECATED_SINCE_V41::ON"
echo "Allow deprecated since version 4.1"
- name: Set Use Quaternions Flag
if: matrix.flag == 'quaternions'
run: |
echo "::set-env name=GTSAM_USE_QUATERNIONS::ON"
echo "Use Quaternions for rotations"
- name: Set GTSAM_WITH_TBB Flag
if: matrix.flag == 'tbb'
run: |
echo "::set-env name=GTSAM_WITH_TBB::ON"
echo "GTSAM Uses TBB"
- name: Build & Test
run: |
bash .github/scripts/unix.sh -t

75
.github/workflows/build-windows.yml vendored Normal file
View File

@ -0,0 +1,75 @@
name: Windows CI
on: [pull_request]
jobs:
build:
name: ${{ matrix.name }} ${{ matrix.build_type }}
runs-on: ${{ matrix.os }}
env:
CTEST_OUTPUT_ON_FAILURE: ON
CTEST_PARALLEL_LEVEL: 2
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
strategy:
fail-fast: false
matrix:
# Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [
windows-2016-cl,
windows-2019-cl,
]
build_type: [Debug, Release]
build_unstable: [ON]
include:
- name: windows-2016-cl
os: windows-2016
compiler: cl
- name: windows-2019-cl
os: windows-2019
compiler: cl
steps:
- name: Checkout
uses: actions/checkout@master
- name: Install (Windows)
if: runner.os == 'Windows'
shell: powershell
run: |
Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh')
scoop install ninja --global
if ("${{ matrix.compiler }}".StartsWith("clang")) {
scoop install llvm --global
}
if ("${{ matrix.compiler }}" -eq "gcc") {
# Chocolatey GCC is broken on the windows-2019 image.
# See: https://github.com/DaanDeMeyer/doctest/runs/231595515
# See: https://github.community/t5/GitHub-Actions/Something-is-wrong-with-the-chocolatey-installed-version-of-gcc/td-p/32413
scoop install gcc --global
echo "::set-env name=CC::gcc"
echo "::set-env name=CXX::g++"
} elseif ("${{ matrix.compiler }}" -eq "clang") {
echo "::set-env name=CC::clang"
echo "::set-env name=CXX::clang++"
} else {
echo "::set-env name=CC::${{ matrix.compiler }}"
echo "::set-env name=CXX::${{ matrix.compiler }}"
}
# Scoop modifies the PATH so we make the modified PATH global.
echo "::set-env name=PATH::$env:PATH"
- name: Build (Windows)
if: runner.os == 'Windows'
run: |
cmake -E remove_directory build
echo "BOOST_ROOT_1_72_0: ${env:BOOST_ROOT_1_72_0}"
cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT_1_72_0}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT_1_72_0}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT_1_72_0}\lib"
cmake --build build --config ${{ matrix.build_type }} --target gtsam
cmake --build build --config ${{ matrix.build_type }} --target gtsam_unstable
cmake --build build --config ${{ matrix.build_type }} --target wrap
cmake --build build --config ${{ matrix.build_type }} --target check.base
cmake --build build --config ${{ matrix.build_type }} --target check.base_unstable
cmake --build build --config ${{ matrix.build_type }} --target check.linear

View File

@ -1,6 +1,9 @@
# This triggers Cython builds on `gtsam-manylinux-build`
name: Trigger Python Builds
on: push
on:
push:
branches:
- develop
jobs:
triggerCython:
runs-on: ubuntu-latest

1
.gitignore vendored
View File

@ -21,3 +21,4 @@ cython/gtsam_wrapper.pxd
/CMakeSettings.json
# for QtCreator:
CMakeLists.txt.user*
xcode/

View File

@ -1,43 +0,0 @@
#!/bin/bash
set -x -e
if [ -z ${PYTHON_VERSION+x} ]; then
echo "Please provide the Python version to build against!"
exit 127
fi
PYTHON="python${PYTHON_VERSION}"
if [[ $(uname) == "Darwin" ]]; then
brew install wget
else
# Install a system package required by our library
sudo apt-get install wget libicu-dev python3-pip python3-setuptools
fi
CURRDIR=$(pwd)
sudo $PYTHON -m pip install -r ./cython/requirements.txt
mkdir $CURRDIR/build
cd $CURRDIR/build
cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \
-DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \
-DGTSAM_USE_QUATERNIONS=OFF \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \
-DCMAKE_INSTALL_PREFIX=$CURRDIR/../gtsam_install
make -j$(nproc) install
cd cython
sudo $PYTHON setup.py install
cd $CURRDIR/cython/gtsam/tests
$PYTHON -m unittest discover

View File

@ -1,140 +0,0 @@
language: cpp
cache: ccache
dist: xenial
addons:
apt:
sources:
- ubuntu-toolchain-r-test
- sourceline: 'deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-9 main'
key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
packages:
- g++-9
- clang-9
- build-essential pkg-config
- cmake
- python3-dev libpython-dev
- python3-numpy
- libboost-all-dev
# before_install:
# - if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update; fi
install:
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then HOMEBREW_NO_AUTO_UPDATE=1 brew install ccache ; fi
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then export PATH="/usr/local/opt/ccache/libexec:$PATH" ; fi
# We first do the compile stage specified below, then the matrix expansion specified after.
stages:
- compile
- test
- special
env:
global:
- MAKEFLAGS="-j3"
- CCACHE_SLOPPINESS=pch_defines,time_macros
# Compile stage without building examples/tests to populate the caches.
jobs:
# -------- STAGE 1: COMPILE -----------
include:
# on Mac, GCC
- stage: compile
os: osx
compiler: gcc
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -b
- stage: compile
os: osx
compiler: gcc
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -b
# on Mac, CLANG
- stage: compile
os: osx
compiler: clang
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -b
- stage: compile
os: osx
compiler: clang
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -b
# on Linux, GCC
- stage: compile
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -b
- stage: compile
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -b
# on Linux, CLANG
- stage: compile
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -b
- stage: compile
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -b
# on Linux, with deprecated ON to make sure that path still compiles/tests
- stage: special
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V41=ON
script: bash .travis.sh -b
# on Linux, with GTSAM_WITH_TBB on to make sure GTSAM still compiles/tests
- stage: special
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_WITH_TBB=ON
script: bash .travis.sh -t
# -------- STAGE 2: TESTS -----------
# on Mac, GCC
- stage: test
os: osx
compiler: clang
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -t
- stage: test
os: osx
compiler: clang
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -t
- stage: test
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -t
- stage: test
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -t
- stage: test
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -t
# on Linux, with quaternions ON to make sure that path still compiles/tests
- stage: special
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release GTSAM_BUILD_UNSTABLE=OFF GTSAM_USE_QUATERNIONS=ON
script: bash .travis.sh -t
- stage: special
os: linux
compiler: gcc
env: PYTHON_VERSION=3
script: bash .travis.python.sh
- stage: special
os: osx
compiler: clang
env: PYTHON_VERSION=3
script: bash .travis.python.sh

View File

@ -65,6 +65,8 @@ add_custom_target(uninstall
# Configurable Options
if(GTSAM_UNSTABLE_AVAILABLE)
option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON)
option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON)
option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF)
endif()
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
@ -75,8 +77,8 @@ option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TB
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
option(GTSAM_TYPEDEF_POINTS_TO_VECTORS "Typedef Point2 and Point3 to Eigen::Vector equivalents" OFF)
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
if(NOT MSVC AND NOT XCODE_VERSION)
@ -99,37 +101,40 @@ endif()
# Options relating to MATLAB wrapper
# TODO: Check for matlab mex binary before handling building of binaries
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
option(GTSAM_INSTALL_CYTHON_TOOLBOX "Enable/Disable installation of Cython toolbox" OFF)
option(GTSAM_BUILD_WRAP "Enable/Disable building of matlab/cython wrap utility (necessary for matlab/cython interface)" ON)
set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of python to build the cython wrapper for (or Default)")
set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of Python to build the wrappers against.")
# Check / set dependent variables for MATLAB wrapper
if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND NOT GTSAM_BUILD_WRAP)
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX or GTSAM_INSTALL_CYTHON_TOOLBOX is enabled, please also enable GTSAM_BUILD_WRAP")
endif()
if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND GTSAM_BUILD_TYPE_POSTFIXES)
if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_BUILD_TYPE_POSTFIXES)
set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX})
endif()
if(GTSAM_INSTALL_WRAP AND NOT GTSAM_BUILD_WRAP)
message(FATAL_ERROR "GTSAM_INSTALL_WRAP is enabled, please also enable GTSAM_BUILD_WRAP")
endif()
if(GTSAM_INSTALL_MATLAB_TOOLBOX AND NOT BUILD_SHARED_LIBS)
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.")
endif()
if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS)
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the MATLAB toolbox cannot deal with this yet. Please turn one of the two options off.")
if(GTSAM_BUILD_PYTHON)
if(GTSAM_UNSTABLE_BUILD_PYTHON)
if (NOT GTSAM_BUILD_UNSTABLE)
message(WARNING "GTSAM_UNSTABLE_BUILD_PYTHON requires the unstable module to be enabled.")
set(GTSAM_UNSTABLE_BUILD_PYTHON OFF)
endif()
endif()
if(GTSAM_INSTALL_CYTHON_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS)
message(FATAL_ERROR "GTSAM_INSTALL_CYTHON_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the CYTHON toolbox cannot deal with this yet. Please turn one of the two options off.")
set(GTSAM_PY_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/python")
endif()
# Flags for choosing default packaging tools
set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator")
set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator")
if (CMAKE_GENERATOR STREQUAL "Ninja" AND
((CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) OR
(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5)))
# Force colored warnings in Ninja's output, if the compiler has -fdiagnostics-color support.
# Rationale in https://github.com/ninja-build/ninja/issues/814
add_compile_options(-fdiagnostics-color=always)
endif()
###############################################################################
# Find boost
@ -419,14 +424,11 @@ endif()
# Build CppUnitLite
add_subdirectory(CppUnitLite)
# Build wrap
if (GTSAM_BUILD_WRAP)
add_subdirectory(wrap)
# suppress warning of cython line being too long
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-misleading-indentation")
# This is the new wrapper
if(GTSAM_BUILD_PYTHON)
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/wrap/cmake")
add_subdirectory(python)
endif()
endif(GTSAM_BUILD_WRAP)
# Build GTSAM library
add_subdirectory(gtsam)
@ -450,20 +452,6 @@ if (GTSAM_INSTALL_MATLAB_TOOLBOX)
add_subdirectory(matlab)
endif()
# Cython wrap
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
set(GTSAM_INSTALL_CYTHON_TOOLBOX 1)
# Set up cache options
# Cython install path appended with Build type (e.g. cython, cythonDebug, etc).
# This does not override custom values set from the command line
set(GTSAM_CYTHON_INSTALL_PATH "${PROJECT_BINARY_DIR}/cython${GTSAM_BUILD_TAG}" CACHE PATH "Cython toolbox destination, blank defaults to PROJECT_BINARY_DIR/cython<GTSAM_BUILD_TAG>")
set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency)
add_subdirectory(cython ${GTSAM_CYTHON_INSTALL_PATH})
else()
set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h
endif()
# Install config and export files
GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake)
@ -509,105 +497,105 @@ set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.43)") #Example: "libc6 (>=
# Print configuration variables
message(STATUS "===============================================================")
message(STATUS "================ Configuration Options ======================")
message(STATUS " CMAKE_CXX_COMPILER_ID type : ${CMAKE_CXX_COMPILER_ID}")
message(STATUS " CMAKE_CXX_COMPILER_VERSION : ${CMAKE_CXX_COMPILER_VERSION}")
message(STATUS " CMake version : ${CMAKE_VERSION}")
message(STATUS " CMake generator : ${CMAKE_GENERATOR}")
message(STATUS " CMake build tool : ${CMAKE_BUILD_TOOL}")
print_config("CMAKE_CXX_COMPILER_ID type" "${CMAKE_CXX_COMPILER_ID}")
print_config("CMAKE_CXX_COMPILER_VERSION" "${CMAKE_CXX_COMPILER_VERSION}")
print_config("CMake version" "${CMAKE_VERSION}")
print_config("CMake generator" "${CMAKE_GENERATOR}")
print_config("CMake build tool" "${CMAKE_BUILD_TOOL}")
message(STATUS "Build flags ")
print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ")
print_config_flag(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all' ")
print_config_flag(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'")
print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests")
print_enabled_config(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all'")
print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'")
if (DOXYGEN_FOUND)
print_config_flag(${GTSAM_BUILD_DOCS} "Build Docs ")
print_enabled_config(${GTSAM_BUILD_DOCS} "Build Docs")
endif()
print_config_flag(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries ")
print_config_flag(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name ")
print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries")
print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name")
if(GTSAM_UNSTABLE_AVAILABLE)
print_config_flag(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ")
print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ")
print_enabled_config(${GTSAM_UNSTABLE_BUILD_PYTHON} "Build GTSAM unstable Python ")
print_enabled_config(${GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX} "Build MATLAB Toolbox for unstable")
endif()
if(NOT MSVC AND NOT XCODE_VERSION)
print_config_flag(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ")
message(STATUS " Build type : ${CMAKE_BUILD_TYPE}")
message(STATUS " C compilation flags : ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
message(STATUS " C++ compilation flags : ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
print_enabled_config(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ")
print_config("Build type" "${CMAKE_BUILD_TYPE}")
print_config("C compilation flags" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
endif()
print_build_options_for_target(gtsam)
message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
if(GTSAM_USE_TBB)
message(STATUS " Use Intel TBB : Yes")
print_config("Use Intel TBB" "Yes")
elseif(TBB_FOUND)
message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled")
print_config("Use Intel TBB" "TBB found but GTSAM_WITH_TBB is disabled")
else()
message(STATUS " Use Intel TBB : TBB not found")
print_config("Use Intel TBB" "TBB not found")
endif()
if(GTSAM_USE_EIGEN_MKL)
message(STATUS " Eigen will use MKL : Yes")
print_config("Eigen will use MKL" "Yes")
elseif(MKL_FOUND)
message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
print_config("Eigen will use MKL" "MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
else()
message(STATUS " Eigen will use MKL : MKL not found")
print_config("Eigen will use MKL" "MKL not found")
endif()
if(GTSAM_USE_EIGEN_MKL_OPENMP)
message(STATUS " Eigen will use MKL and OpenMP : Yes")
print_config("Eigen will use MKL and OpenMP" "Yes")
elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL)
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
elseif(OPENMP_FOUND AND NOT MKL_FOUND)
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found")
print_config("Eigen will use MKL and OpenMP" "OpenMP found but MKL not found")
elseif(OPENMP_FOUND)
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
else()
message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found")
print_config("Eigen will use MKL and OpenMP" "OpenMP not found")
endif()
message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
print_config("Default allocator" "${GTSAM_DEFAULT_ALLOCATOR}")
if(GTSAM_THROW_CHEIRALITY_EXCEPTION)
message(STATUS " Cheirality exceptions enabled : YES")
print_config("Cheirality exceptions enabled" "YES")
else()
message(STATUS " Cheirality exceptions enabled : NO")
print_config("Cheirality exceptions enabled" "NO")
endif()
if(NOT MSVC AND NOT XCODE_VERSION)
if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
message(STATUS " Build with ccache : Yes")
print_config("Build with ccache" "Yes")
elseif(CCACHE_FOUND)
message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
else()
message(STATUS " Build with ccache : No")
print_config("Build with ccache" "No")
endif()
endif()
message(STATUS "Packaging flags")
message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")
message(STATUS " CPack Generator : ${CPACK_GENERATOR}")
print_config("CPack Source Generator" "${CPACK_SOURCE_GENERATOR}")
print_config("CPack Generator" "${CPACK_GENERATOR}")
message(STATUS "GTSAM flags ")
print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
print_config_flag(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ")
print_config_flag(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
print_config_flag(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1")
print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ")
print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1")
print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
message(STATUS "MATLAB toolbox flags")
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ")
print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ")
if (${GTSAM_INSTALL_MATLAB_TOOLBOX})
message(STATUS " MATLAB root : ${MATLAB_ROOT}")
message(STATUS " MEX binary : ${MEX_COMMAND}")
print_config("MATLAB root" "${MATLAB_ROOT}")
print_config("MEX binary" "${MEX_COMMAND}")
endif()
message(STATUS "Python toolbox flags ")
print_enabled_config(${GTSAM_BUILD_PYTHON} "Build Python module with pybind ")
if(GTSAM_BUILD_PYTHON)
print_config("Python version" ${GTSAM_PYTHON_VERSION})
endif()
message(STATUS "Cython toolbox flags ")
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
if(GTSAM_INSTALL_CYTHON_TOOLBOX)
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
endif()
message(STATUS "===============================================================")
# Print warnings at the end

View File

@ -23,3 +23,5 @@ ordering library
- Included unmodified in gtsam/3rdparty/metis
- Licenced under Apache License v 2.0, provided in
gtsam/3rdparty/metis/LICENSE.txt
- Spectra v0.9.0: Sparse Eigenvalue Computation Toolkit as a Redesigned ARPACK.
- Licenced under MPL2, provided at https://github.com/yixuan/spectra

View File

@ -1,5 +1,11 @@
# README - Georgia Tech Smoothing and Mapping Library
**Important Note**
As of August 1 2020, the `develop` branch is officially in "Pre 4.1" mode, and features deprecated in 4.0 have been removed. Please use the last [4.0.3 release](https://github.com/borglab/gtsam/releases/tag/4.0.3) if you need those features.
However, most are easily converted and can be tracked down (in 4.0.3) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V4`.
## What is GTSAM?
GTSAM is a C++ library that implements smoothing and
@ -7,13 +13,14 @@ mapping (SAM) in robotics and vision, using Factor Graphs and Bayes
Networks as the underlying computing paradigm rather than sparse
matrices.
| Platform | Build Status |
|:---------:|:-------------:|
| gcc/clang | [![Build Status](https://travis-ci.com/borglab/gtsam.svg?branch=develop)](https://travis-ci.com/borglab/gtsam/) |
| MSVC | [![Build status](https://ci.appveyor.com/api/projects/status/3enllitj52jsxwfg/branch/develop?svg=true)](https://ci.appveyor.com/project/dellaert/gtsam) |
| Platform | Compiler | Build Status |
|:------------:|:---------:|:-------------:|
| Ubuntu 18.04 | gcc/clang | ![Linux CI](https://github.com/borglab/gtsam/workflows/Linux%20CI/badge.svg) |
| macOS | clang | ![macOS CI](https://github.com/borglab/gtsam/workflows/macOS%20CI/badge.svg) |
| Windows | MSVC | ![Windows CI](https://github.com/borglab/gtsam/workflows/Windows%20CI/badge.svg) |
On top of the C++ library, GTSAM includes [wrappers for MATLAB & Python](##Wrappers).
On top of the C++ library, GTSAM includes [wrappers for MATLAB & Python](#wrappers).
## Quickstart

View File

@ -1,33 +0,0 @@
# version format
version: 4.0.3-{branch}-build{build}
os: Visual Studio 2019
clone_folder: c:\projects\gtsam
platform: x64
configuration: Release
environment:
CTEST_OUTPUT_ON_FAILURE: 1
BOOST_ROOT: C:/Libraries/boost_1_71_0
build_script:
- cd c:\projects\gtsam\build
# As of Dec 2019, not all unit tests build cleanly for MSVC, so we'll just
# check that parts of GTSAM build correctly:
#- cmake --build .
- cmake --build . --config Release --target gtsam
- cmake --build . --config Release --target gtsam_unstable
- cmake --build . --config Release --target wrap
#- cmake --build . --target check
- cmake --build . --config Release --target check.base
- cmake --build . --config Release --target check.base_unstable
- cmake --build . --config Release --target check.linear
before_build:
- cd c:\projects\gtsam
- mkdir build
- cd build
# Disable examples to avoid AppVeyor timeout
- cmake -G "Visual Studio 16 2019" .. -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF

View File

@ -17,8 +17,6 @@ install(FILES
GtsamBuildTypes.cmake
GtsamMakeConfigFile.cmake
GtsamMatlabWrap.cmake
GtsamPythonWrap.cmake
GtsamCythonWrap.cmake
GtsamTesting.cmake
GtsamPrinting.cmake
FindCython.cmake

View File

@ -1,204 +0,0 @@
# Check Cython version, need to be >=0.25.2
# Unset these cached variables to avoid surprises when the python/cython
# in the current environment are different from the cached!
unset(PYTHON_EXECUTABLE CACHE)
unset(CYTHON_EXECUTABLE CACHE)
unset(PYTHON_INCLUDE_DIR CACHE)
unset(PYTHON_MAJOR_VERSION CACHE)
unset(PYTHON_LIBRARY CACHE)
# Allow override from command line
if(NOT DEFINED GTSAM_USE_CUSTOM_PYTHON_LIBRARY)
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
find_package(PythonInterp REQUIRED)
find_package(PythonLibs REQUIRED)
else()
find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
find_package(PythonLibs ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
endif()
endif()
find_package(Cython 0.25.2 REQUIRED)
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
"from __future__ import print_function;import sys;print(sys.version[0], end='')"
OUTPUT_VARIABLE PYTHON_MAJOR_VERSION
)
# User-friendly Cython wrapping and installing function.
# Builds a Cython module from the provided interface_header.
# For example, for the interface header gtsam.h,
# this will build the wrap module 'gtsam'.
#
# Arguments:
#
# interface_header: The relative path to the wrapper interface definition file.
# extra_imports: extra header to import in the Cython pxd file.
# For example, to use Cython gtsam.pxd in your own module,
# use "from gtsam cimport *"
# install_path: destination to install the library
# libs: libraries to link with
# dependencies: Dependencies which need to be built before the wrapper
function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies)
# Paths for generated files
get_filename_component(module_name "${interface_header}" NAME_WE)
set(generated_files_path "${install_path}")
wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}")
endfunction()
function(set_up_required_cython_packages)
# Set up building of cython module
include_directories(${PYTHON_INCLUDE_DIRS})
find_package(NumPy REQUIRED)
include_directories(${NUMPY_INCLUDE_DIRS})
endfunction()
# Convert pyx to cpp by executing cython
# This is the first step to compile cython from the command line
# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html
#
# Arguments:
# - target: The specified target for this step
# - pyx_file: The input pyx_file in full *absolute* path
# - generated_cpp: The output cpp file in full absolute path
# - include_dirs: Directories to include when executing cython
function(pyx_to_cpp target pyx_file generated_cpp include_dirs)
foreach(dir ${include_dirs})
set(includes_for_cython ${includes_for_cython} -I ${dir})
endforeach()
add_custom_command(
OUTPUT ${generated_cpp}
COMMAND
${CYTHON_EXECUTABLE} -X boundscheck=False -v --fast-fail --cplus -${PYTHON_MAJOR_VERSION} ${includes_for_cython} ${pyx_file} -o ${generated_cpp}
VERBATIM)
add_custom_target(${target} ALL DEPENDS ${generated_cpp})
endfunction()
# Build the cpp file generated by converting pyx using cython
# This is the second step to compile cython from the command line
# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html
#
# Arguments:
# - target: The specified target for this step
# - cpp_file: The input cpp_file in full *absolute* path
# - output_lib_we: The output lib filename only (without extension)
# - output_dir: The output directory
function(build_cythonized_cpp target cpp_file output_lib_we output_dir)
add_library(${target} MODULE ${cpp_file})
if(WIN32)
# Use .pyd extension instead of .dll on Windows
set_target_properties(${target} PROPERTIES SUFFIX ".pyd")
# Add full path to the Python library
target_link_libraries(${target} ${PYTHON_LIBRARIES})
endif()
if(APPLE)
set(link_flags "-undefined dynamic_lookup")
endif()
set_target_properties(${target}
PROPERTIES COMPILE_FLAGS "-w"
LINK_FLAGS "${link_flags}"
OUTPUT_NAME ${output_lib_we}
PREFIX ""
${CMAKE_BUILD_TYPE_UPPER}_POSTFIX ""
LIBRARY_OUTPUT_DIRECTORY ${output_dir})
endfunction()
# Cythonize a pyx from the command line as described at
# http://cython.readthedocs.io/en/latest/src/reference/compilation.html
# Arguments:
# - target: The specified target
# - pyx_file: The input pyx_file in full *absolute* path
# - output_lib_we: The output lib filename only (without extension)
# - output_dir: The output directory
# - include_dirs: Directories to include when executing cython
# - libs: Libraries to link with
# - interface_header: For dependency. Any update in interface header will re-trigger cythonize
function(cythonize target pyx_file output_lib_we output_dir include_dirs libs interface_header dependencies)
get_filename_component(pyx_path "${pyx_file}" DIRECTORY)
get_filename_component(pyx_name "${pyx_file}" NAME_WE)
set(generated_cpp "${output_dir}/${pyx_name}.cpp")
set_up_required_cython_packages()
pyx_to_cpp(${target}_pyx2cpp ${pyx_file} ${generated_cpp} "${include_dirs}")
# Late dependency injection, to make sure this gets called whenever the interface header is updated
# See: https://stackoverflow.com/questions/40032593/cmake-does-not-rebuild-dependent-after-prerequisite-changes
add_custom_command(OUTPUT ${generated_cpp} DEPENDS ${interface_header} ${pyx_file} APPEND)
if (NOT "${dependencies}" STREQUAL "")
add_dependencies(${target}_pyx2cpp "${dependencies}")
endif()
build_cythonized_cpp(${target} ${generated_cpp} ${output_lib_we} ${output_dir})
if (NOT "${libs}" STREQUAL "")
target_link_libraries(${target} "${libs}")
endif()
add_dependencies(${target} ${target}_pyx2cpp)
if(TARGET ${python_install_target})
add_dependencies(${python_install_target} ${target})
endif()
endfunction()
# Internal function that wraps a library and compiles the wrapper
function(wrap_library_cython interface_header generated_files_path extra_imports libs dependencies)
# Wrap codegen interface
# Extract module path and name from interface header file name
# wrap requires interfacePath to be *absolute*
get_filename_component(interface_header "${interface_header}" ABSOLUTE)
get_filename_component(module_path "${interface_header}" PATH)
get_filename_component(module_name "${interface_header}" NAME_WE)
# Wrap module to Cython pyx
message(STATUS "Cython wrapper generating ${generated_files_path}/${module_name}.pyx")
set(generated_pyx "${generated_files_path}/${module_name}.pyx")
if(NOT EXISTS ${generated_files_path})
file(MAKE_DIRECTORY "${generated_files_path}")
endif()
add_custom_command(
OUTPUT ${generated_pyx}
DEPENDS ${interface_header} wrap
COMMAND
wrap --cython ${module_path} ${module_name} ${generated_files_path} "${extra_imports}"
VERBATIM
WORKING_DIRECTORY ${generated_files_path}/../)
add_custom_target(cython_wrap_${module_name}_pyx ALL DEPENDS ${generated_pyx})
if(NOT "${dependencies}" STREQUAL "")
add_dependencies(cython_wrap_${module_name}_pyx ${dependencies})
endif()
message(STATUS "Cythonize and build ${module_name}.pyx")
get_property(include_dirs DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES)
cythonize(cythonize_${module_name} ${generated_pyx} ${module_name}
${generated_files_path} "${include_dirs}" "${libs}" ${interface_header} cython_wrap_${module_name}_pyx)
# distclean
add_custom_target(wrap_${module_name}_cython_distclean
COMMAND cmake -E remove_directory ${generated_files_path})
endfunction()
# Helper function to install Cython scripts and handle multiple build types where the scripts
# should be installed to all build type toolboxes
#
# Arguments:
# source_directory: The source directory to be installed. "The last component of each directory
# name is appended to the destination directory but a trailing slash may be
# used to avoid this because it leaves the last component empty."
# (https://cmake.org/cmake/help/v3.3/command/install.html?highlight=install#installing-directories)
# dest_directory: The destination directory to install to.
# patterns: list of file patterns to install
function(install_cython_scripts source_directory dest_directory patterns)
set(patterns_args "")
set(exclude_patterns "")
foreach(pattern ${patterns})
list(APPEND patterns_args PATTERN "${pattern}")
endforeach()
file(COPY "${source_directory}" DESTINATION "${dest_directory}"
FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
endfunction()

View File

@ -23,6 +23,11 @@ else()
file(GLOB matlab_bin_directories "/usr/local/MATLAB/*/bin")
set(mex_program_name "mex")
endif()
if(GTSAM_CUSTOM_MATLAB_PATH)
set(matlab_bin_directories ${GTSAM_CUSTOM_MATLAB_PATH})
endif()
# Run find_program explicitly putting $PATH after our predefined program
# directories using 'ENV PATH' and 'NO_SYSTEM_ENVIRONMENT_PATH' - this prevents
# finding the LaTeX mex program (totally unrelated to MATLAB Mex) when LaTeX is
@ -209,15 +214,34 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex
# Set up generation of module source file
file(MAKE_DIRECTORY "${generated_files_path}")
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
find_package(PythonInterp REQUIRED)
find_package(PythonLibs REQUIRED)
else()
find_package(PythonInterp
${GTSAM_PYTHON_VERSION}
EXACT
REQUIRED)
find_package(PythonLibs
${GTSAM_PYTHON_VERSION}
EXACT
REQUIRED)
endif()
set(_ignore gtsam::Point2
gtsam::Point3)
add_custom_command(
OUTPUT ${generated_cpp_file}
DEPENDS ${interfaceHeader} wrap ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects}
DEPENDS ${interfaceHeader} ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects}
COMMAND
wrap --matlab
${modulePath}
${moduleName}
${generated_files_path}
${matlab_h_path}
${PYTHON_EXECUTABLE}
${CMAKE_SOURCE_DIR}/wrap/matlab_wrapper.py
--src ${interfaceHeader}
--module_name ${moduleName}
--out ${generated_files_path}
--top_module_namespaces ${moduleName}
--ignore ${_ignore}
VERBATIM
WORKING_DIRECTORY ${generated_files_path})

View File

@ -1,14 +1,3 @@
# print configuration variables
# Usage:
#print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ")
function(print_config_flag flag msg)
if (flag)
message(STATUS " ${msg}: Enabled")
else ()
message(STATUS " ${msg}: Disabled")
endif ()
endfunction()
# Based on https://github.com/jimbraun/XCDF/blob/master/cmake/CMakePadString.cmake
function(string_pad RESULT_NAME DESIRED_LENGTH VALUE)
string(LENGTH "${VALUE}" VALUE_LENGTH)
@ -26,6 +15,27 @@ endfunction()
set(GTSAM_PRINT_SUMMARY_PADDING_LENGTH 50 CACHE STRING "Padding of cmake summary report lines after configuring.")
mark_as_advanced(GTSAM_PRINT_SUMMARY_PADDING_LENGTH)
# print configuration variables with automatic padding
# Usage:
# print_config(${GTSAM_BUILD_TESTS} "Build Tests")
function(print_config config msg)
string_pad(padded_config ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${config}")
message(STATUS "${padded_config}: ${msg}")
endfunction()
# print configuration variable with enabled/disabled value
# Usage:
# print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests ")
function(print_enabled_config config msg)
string_pad(padded_msg ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${msg}")
if (config)
message(STATUS "${padded_msg}: Enabled")
else ()
message(STATUS "${padded_msg}: Disabled")
endif ()
endfunction()
# Print " var: ${var}" padding with spaces as needed
function(print_padded variable_name)
string_pad(padded_prop ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${variable_name}")

View File

@ -1,102 +0,0 @@
#Setup cache options
set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "Target python version for GTSAM python module. Use 'Default' to chose the default version")
set(GTSAM_BUILD_PYTHON_FLAGS "" CACHE STRING "Extra flags for running Matlab PYTHON compilation")
set(GTSAM_PYTHON_INSTALL_PATH "" CACHE PATH "Python toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/borg/python")
if(NOT GTSAM_PYTHON_INSTALL_PATH)
set(GTSAM_PYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/borg/python")
endif()
#Author: Paul Furgale Modified by Andrew Melim
function(wrap_python TARGET_NAME PYTHON_MODULE_DIRECTORY)
# # Boost
# find_package(Boost COMPONENTS python filesystem system REQUIRED)
# include_directories(${Boost_INCLUDE_DIRS})
# # Find Python
# FIND_PACKAGE(PythonLibs 2.7 REQUIRED)
# INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIRS})
IF(APPLE)
# The apple framework headers don't include the numpy headers for some reason.
GET_FILENAME_COMPONENT(REAL_PYTHON_INCLUDE ${PYTHON_INCLUDE_DIRS} REALPATH)
IF( ${REAL_PYTHON_INCLUDE} MATCHES Python.framework)
message("Trying to find extra headers for numpy from ${REAL_PYTHON_INCLUDE}.")
message("Looking in ${REAL_PYTHON_INCLUDE}/../../Extras/lib/python/numpy/core/include/numpy")
FIND_PATH(NUMPY_INCLUDE_DIR arrayobject.h
${REAL_PYTHON_INCLUDE}/../../Extras/lib/python/numpy/core/include/numpy
${REAL_PYTHON_INCLUDE}/numpy
)
IF(${NUMPY_INCLUDE_DIR} MATCHES NOTFOUND)
message("Unable to find numpy include directories: ${NUMPY_INCLUDE_DIR}")
ELSE()
message("Found headers at ${NUMPY_INCLUDE_DIR}")
INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR})
INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR}/..)
ENDIF()
ENDIF()
ENDIF(APPLE)
if(MSVC)
add_library(${moduleName}_python MODULE ${ARGN})
set_target_properties(${moduleName}_python PROPERTIES
OUTPUT_NAME ${moduleName}_python
CLEAN_DIRECT_OUTPUT 1
VERSION 1
SOVERSION 0
SUFFIX ".pyd")
target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp
set(PYLIB_OUTPUT_FILE $<TARGET_FILE:${moduleName}_python>)
message(${PYLIB_OUTPUT_FILE})
get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE)
set(PYLIB_SO_NAME ${PYLIB_OUTPUT_NAME}.pyd)
ELSE()
# Create a shared library
add_library(${moduleName}_python SHARED ${generated_cpp_file})
set_target_properties(${moduleName}_python PROPERTIES
OUTPUT_NAME ${moduleName}_python
CLEAN_DIRECT_OUTPUT 1)
target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp
# On OSX and Linux, the python library must end in the extension .so. Build this
# filename here.
get_property(PYLIB_OUTPUT_FILE TARGET ${moduleName}_python PROPERTY LOCATION)
set(PYLIB_OUTPUT_FILE $<TARGET_FILE:${moduleName}_python>)
message(${PYLIB_OUTPUT_FILE})
get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE)
set(PYLIB_SO_NAME lib${moduleName}_python.so)
ENDIF(MSVC)
# Installs the library in the gtsam folder, which is used by setup.py to create the gtsam package
set(PYTHON_MODULE_DIRECTORY ${CMAKE_SOURCE_DIR}/python/gtsam)
# Cause the library to be output in the correct directory.
add_custom_command(TARGET ${moduleName}_python
POST_BUILD
COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME}
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
COMMENT "Copying library files to python directory" )
# Cause the library to be output in the correct directory.
add_custom_command(TARGET ${TARGET_NAME}
POST_BUILD
COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME}
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
COMMENT "Copying library files to python directory" )
get_directory_property(AMCF ADDITIONAL_MAKE_CLEAN_FILES)
list(APPEND AMCF ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME})
set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${AMCF}")
endfunction(wrap_python)
# Macro to get list of subdirectories
macro(SUBDIRLIST result curdir)
file(GLOB children RELATIVE ${curdir} ${curdir}/*)
set(dirlist "")
foreach(child ${children})
if(IS_DIRECTORY ${curdir}/${child})
list(APPEND dirlist ${child})
endif()
endforeach()
set(${result} ${dirlist})
endmacro()

View File

@ -47,9 +47,14 @@
# endif
# endif
#else
#ifdef __APPLE__
# define @library_name@_EXPORT __attribute__((visibility("default")))
# define @library_name@_EXTERN_EXPORT extern
#else
# define @library_name@_EXPORT
# define @library_name@_EXTERN_EXPORT extern
#endif
#endif
#undef BUILD_SHARED_LIBS

View File

@ -1,57 +0,0 @@
# Install cython components
include(GtsamCythonWrap)
# Create the cython toolbox for the gtsam library
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
# Add the new make target command
set(python_install_target python-install)
add_custom_target(${python_install_target}
COMMAND ${PYTHON_EXECUTABLE} ${GTSAM_CYTHON_INSTALL_PATH}/setup.py install
WORKING_DIRECTORY ${GTSAM_CYTHON_INSTALL_PATH})
# build and include the eigency version of eigency
add_subdirectory(gtsam_eigency)
include_directories(${GTSAM_EIGENCY_INSTALL_PATH})
# Fix for error "C1128: number of sections exceeded object file format limit"
if(MSVC)
add_compile_options(/bigobj)
endif()
# First set up all the package related files.
# This also ensures the below wrap operations work correctly.
set(CYTHON_INSTALL_REQUIREMENTS_FILE "${PROJECT_SOURCE_DIR}/cython/requirements.txt")
# Install the custom-generated __init__.py
# This makes the cython (sub-)directories into python packages, so gtsam can be found while wrapping gtsam_unstable
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam/__init__.py COPYONLY)
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable/__init__.py COPYONLY)
configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${GTSAM_CYTHON_INSTALL_PATH}/setup.py)
# Wrap gtsam
add_custom_target(gtsam_header DEPENDS "../gtsam.h")
wrap_and_install_library_cython("../gtsam.h" # interface_header
"" # extra imports
"${GTSAM_CYTHON_INSTALL_PATH}/gtsam" # install path
gtsam # library to link with
"wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping
)
add_dependencies(${python_install_target} gtsam gtsam_header)
# Wrap gtsam_unstable
if(GTSAM_BUILD_UNSTABLE)
add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h")
wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header
"from gtsam.gtsam cimport *" # extra imports
"${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable" # install path
gtsam_unstable # library to link with
"gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping
)
add_dependencies(${python_install_target} gtsam_unstable gtsam_unstable_header)
endif()
# install scripts and tests
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
endif ()

View File

@ -1,147 +0,0 @@
# Python Wrapper
This is the Python wrapper around the GTSAM C++ library. We use Cython to generate the bindings to the underlying C++ code.
## Requirements
- If you want to build the GTSAM python library for a specific python version (eg 3.6),
use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used.
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment),
then the environment should be active while building GTSAM.
- This wrapper needs `Cython(>=0.25.2)`, `backports_abc(>=0.5)`, and `numpy(>=1.11.0)`. These can be installed as follows:
```bash
pip install -r <gtsam_folder>/cython/requirements.txt
```
- For compatibility with GTSAM's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
named `gtsam_eigency`, to interface between C++'s Eigen and Python's numpy.
## Install
- Run cmake with the `GTSAM_INSTALL_CYTHON_TOOLBOX` cmake flag enabled to configure building the wrapper. The wrapped module will be built and copied to the directory defined by `GTSAM_CYTHON_INSTALL_PATH`, which is by default `<PROJECT_BINARY_DIR>/cython` in Release mode and `<PROJECT_BINARY_DIR>/cython<CMAKE_BUILD_TYPE>` for other modes.
- Build GTSAM and the wrapper with `make`.
- To install, simply run `make python-install`.
- The same command can be used to install into a virtual environment if it is active.
- **NOTE**: if you don't want GTSAM to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install GTSAM to a subdirectory of the build directory.
- You can also directly run `make python-install` without running `make`, and it will compile all the dependencies accordingly.
## Unit Tests
The Cython toolbox also has a small set of unit tests located in the
test directory. To run them:
```bash
cd <GTSAM_CYTHON_INSTALL_PATH>
python -m unittest discover
```
## Utils
TODO
## Examples
TODO
## Writing Your Own Scripts
See the tests for examples.
### Some Important Notes:
- Vector/Matrix:
- GTSAM expects double-precision floating point vectors and matrices.
Hence, you should pass numpy matrices with `dtype=float`, or `float64`.
- Also, GTSAM expects _column-major_ matrices, unlike the default storage
scheme in numpy. Hence, you should pass column-major matrices to GTSAM using
the flag order='F'. And you always get column-major matrices back.
For more details, see [this link](https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed).
- Passing row-major matrices of different dtype, e.g. `int`, will also work
as the wrapper converts them to column-major and dtype float for you,
using numpy.array.astype(float, order='F', copy=False).
However, this will result a copy if your matrix is not in the expected type
and storage order.
- Inner namespace: Classes in inner namespace will be prefixed by <innerNamespace>\_ in Python.
Examples: `noiseModel_Gaussian`, `noiseModel_mEstimator_Tukey`
- Casting from a base class to a derive class must be done explicitly.
Examples:
```python
noiseBase = factor.noiseModel()
noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase)
```
## Wrapping Custom GTSAM-based Project
Please refer to the template project and the corresponding tutorial available [here](https://github.com/borglab/GTSAM-project-python).
## KNOWN ISSUES
- Doesn't work with python3 installed from homebrew
- size-related issue: can only wrap up to a certain number of classes: up to mEstimator!
- Guess: 64 vs 32b? disutils Compiler flags?
- Bug with Cython 0.24: instantiated factor classes return FastVector<size_t> for keys(), which can't be casted to FastVector<Key>
- Upgrading to 0.25 solves the problem
- Need default constructor and default copy constructor for almost every classes... :(
- support these constructors by default and declare "delete" for special classes?
### TODO
- [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython.
- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in GTSAM?)
- [ ] inner namespaces ==> inner packages?
- [ ] Wrap fixed-size Matrices/Vectors?
### Completed/Cancelled:
- [x] Fix Python tests: don't use " import <package> \* ": Bad style!!! (18-03-17 19:50)
- [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files
- [x] Wrap unstable @done (18-03-17 15:30)
- [x] Unify cython/GTSAM.h and the original GTSAM.h @done (18-03-17 15:30)
- [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem.
- [x] 06-03-17: manage to remove the requirements for default and copy constructors
- [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector<Key>, to FastVector<size_t>.
- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: `GTSAM::JointMarginal __pyx_t_1;` Users don't have to wrap this constructor, however.
- [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00)
- [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30)
- [x] CMake install script @done (25-11-16 02:30)
- [ ] [REFACTOR] better name for uninstantiateClass: very vague!! @cancelled (25-11-16 02:30) -- lazy
- [ ] forward declaration? @cancelled (23-11-16 13:00) - nothing to do, seem to work?
- [x] wrap VariableIndex: why is it in inference? If need to, shouldn't have constructors to specific FactorGraphs @done (23-11-16 13:00)
- [x] Global functions @done (22-11-16 21:00)
- [x] [REFACTOR] typesEqual --> isSameSignature @done (22-11-16 21:00)
- [x] Proper overloads (constructors, static methods, methods) @done (20-11-16 21:00)
- [x] Allow overloading methods. The current solution is annoying!!! @done (20-11-16 21:00)
- [x] Casting from parent and grandparents @done (16-11-16 17:00)
- [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00)
- [x] Support "print obj" @done (16-11-16 17:00)
- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00)
- [x] Cython: Key and size_t: traits<size_t> doesn't exist @done (16-09-12 18:34)
- [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19)
- [x] [Nice to have] parse typedef @done (16-09-13 17:19)
- [x] ctypedef at correct places @done (16-09-12 18:34)
- [x] expand template variable type in constructor/static methods? @done (16-09-12 18:34)
- [x] NonlinearOptimizer: copy constructor deleted!!! @done (16-09-13 17:20)
- [x] Value: no default constructor @done (16-09-13 17:20)
- [x] ctypedef PriorFactor[Vector] PriorFactorVector @done (16-09-19 12:25)
- [x] Delete duplicate methods in derived class @done (16-09-12 13:38)
- [x] Fix return properly @done (16-09-11 17:14)
- [x] handle pair @done (16-09-11 17:14)
- [x] Eigency: ambiguous call: A(const T&) A(const Vector& v) and Eigency A(Map[Vector]& v) @done (16-09-11 07:59)
- [x] Eigency: Constructor: ambiguous construct from Vector/Matrix @done (16-09-11 07:59)
- [x] Eigency: Fix method template of Vector/Matrix: template argument is [Vector] while arugment is Map[Vector] @done (16-09-11 08:22)
- [x] Robust noise: copy assignment operator is deleted because of shared_ptr of the abstract Base class @done (16-09-10 09:05)
- [ ] Cython: Constructor: generate default constructor? (hack: if it's serializable?) @cancelled (16-09-13 17:20)
- [ ] Eigency: Map[] to Block @created(16-09-10 07:59) @cancelled (16-09-11 08:28)
- inference before symbolic/linear
- what's the purpose of "virtual" ??

View File

@ -1,26 +0,0 @@
from .gtsam import *
try:
import gtsam_unstable
def _deprecated_wrapper(item, name):
def wrapper(*args, **kwargs):
from warnings import warn
message = ('importing the unstable item "{}" directly from gtsam is deprecated. '.format(name) +
'Please import it from gtsam_unstable.')
warn(message)
return item(*args, **kwargs)
return wrapper
for name in dir(gtsam_unstable):
if not name.startswith('__'):
item = getattr(gtsam_unstable, name)
if callable(item):
item = _deprecated_wrapper(item, name)
globals()[name] = item
except ImportError:
pass

View File

@ -1,153 +0,0 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
A script validating and demonstrating the ImuFactor inference.
Author: Frank Dellaert, Varun Agrawal
"""
from __future__ import print_function
import math
import gtsam
import matplotlib.pyplot as plt
import numpy as np
from gtsam import symbol_shorthand_B as B
from gtsam import symbol_shorthand_V as V
from gtsam import symbol_shorthand_X as X
from gtsam.utils.plot import plot_pose3
from mpl_toolkits.mplot3d import Axes3D
from PreintegrationExample import POSES_FIG, PreintegrationExample
BIAS_KEY = B(0)
np.set_printoptions(precision=3, suppress=True)
class ImuFactorExample(PreintegrationExample):
def __init__(self):
self.velocity = np.array([2, 0, 0])
self.priorNoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1)
self.velNoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
# Choose one of these twists to change scenario:
zero_twist = (np.zeros(3), np.zeros(3))
forward_twist = (np.zeros(3), self.velocity)
loop_twist = (np.array([0, -math.radians(30), 0]), self.velocity)
sick_twist = (
np.array([math.radians(30), -math.radians(30), 0]), self.velocity)
accBias = np.array([-0.3, 0.1, 0.2])
gyroBias = np.array([0.1, 0.3, -0.1])
bias = gtsam.imuBias_ConstantBias(accBias, gyroBias)
dt = 1e-2
super(ImuFactorExample, self).__init__(sick_twist, bias, dt)
def addPrior(self, i, graph):
state = self.scenario.navState(i)
graph.push_back(gtsam.PriorFactorPose3(
X(i), state.pose(), self.priorNoise))
graph.push_back(gtsam.PriorFactorVector(
V(i), state.velocity(), self.velNoise))
def run(self):
graph = gtsam.NonlinearFactorGraph()
# initialize data structure for pre-integrated IMU measurements
pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias)
T = 12
num_poses = T + 1 # assumes 1 factor per second
initial = gtsam.Values()
initial.insert(BIAS_KEY, self.actualBias)
for i in range(num_poses):
state_i = self.scenario.navState(float(i))
poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1)
pose = state_i.pose().compose(poseNoise)
velocity = state_i.velocity() + np.random.randn(3)*0.1
initial.insert(X(i), pose)
initial.insert(V(i), velocity)
# simulate the loop
i = 0 # state index
actual_state_i = self.scenario.navState(0)
for k, t in enumerate(np.arange(0, T, self.dt)):
# get measurements and add them to PIM
measuredOmega = self.runner.measuredAngularVelocity(t)
measuredAcc = self.runner.measuredSpecificForce(t)
pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt)
poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1)
actual_state_i = gtsam.NavState(
actual_state_i.pose().compose(poseNoise),
actual_state_i.velocity() + np.random.randn(3)*0.1)
# Plot IMU many times
if k % 10 == 0:
self.plotImu(t, measuredOmega, measuredAcc)
# Plot every second
if k % int(1 / self.dt) == 0:
self.plotGroundTruthPose(t)
# create IMU factor every second
if (k + 1) % int(1 / self.dt) == 0:
factor = gtsam.ImuFactor(X(i), V(i), X(
i + 1), V(i + 1), BIAS_KEY, pim)
graph.push_back(factor)
if True:
print(factor)
print(pim.predict(actual_state_i, self.actualBias))
pim.resetIntegration()
actual_state_i = self.scenario.navState(t + self.dt)
i += 1
# add priors on beginning and end
self.addPrior(0, graph)
self.addPrior(num_poses - 1, graph)
# optimize using Levenberg-Marquardt optimization
params = gtsam.LevenbergMarquardtParams()
params.setVerbosityLM("SUMMARY")
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
result = optimizer.optimize()
# Calculate and print marginal covariances
marginals = gtsam.Marginals(graph, result)
print("Covariance on bias:\n", marginals.marginalCovariance(BIAS_KEY))
for i in range(num_poses):
print("Covariance on pose {}:\n{}\n".format(
i, marginals.marginalCovariance(X(i))))
print("Covariance on vel {}:\n{}\n".format(
i, marginals.marginalCovariance(V(i))))
# Plot resulting poses
i = 0
while result.exists(X(i)):
pose_i = result.atPose3(X(i))
plot_pose3(POSES_FIG, pose_i, 0.1)
i += 1
gtsam.utils.plot.set_axes_equal(POSES_FIG)
print(result.atimuBias_ConstantBias(BIAS_KEY))
plt.ioff()
plt.show()
if __name__ == '__main__':
ImuFactorExample().run()

View File

@ -1,42 +0,0 @@
include(GtsamCythonWrap)
# Copy eigency's sources to the build folder
# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core
# and eigency's cython pxd headers can be found when cythonizing gtsam
file(COPY "." DESTINATION ".")
set(OUTPUT_DIR "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency")
set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR})
# This is to make the build/cython/gtsam_eigency folder a python package
configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py)
# include eigency headers
include_directories(${EIGENCY_INCLUDE_DIR})
# Cythonize and build eigency
message(STATUS "Cythonize and build eigency")
# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is
# a part of the gtsam_eigency package and generate the function call import_gtsam_eigency__conversions()
# in conversions_api.h correctly!
cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions"
"${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "")
cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core"
${OUTPUT_DIR} "${EIGENCY_INCLUDE_DIR}" "" "" "")
# Include Eigen headers:
target_include_directories(cythonize_eigency_conversions PUBLIC
$<BUILD_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_BUILD}>
$<INSTALL_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}>
)
target_include_directories(cythonize_eigency_core PUBLIC
$<BUILD_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_BUILD}>
$<INSTALL_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}>
)
add_dependencies(cythonize_eigency_core cythonize_eigency_conversions)
add_custom_target(cythonize_eigency)
add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core)
if(TARGET ${python_install_target})
add_dependencies(${python_install_target} cythonize_eigency)
endif()

View File

@ -1,20 +0,0 @@
Copyright (c) 2016 Wouter Boomsma
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,13 +0,0 @@
import os
import numpy as np
__eigen_dir__ = "${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}"
def get_includes(include_eigen=True):
root = os.path.dirname(__file__)
parent = os.path.join(root, "..")
path = [root, parent, np.get_include()]
if include_eigen:
path.append(os.path.join(root, __eigen_dir__))
return path

View File

@ -1,62 +0,0 @@
cimport numpy as np
cdef api np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride)
cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride)

View File

@ -1,327 +0,0 @@
cimport cython
import numpy as np
from numpy.lib.stride_tricks import as_strided
@cython.boundscheck(False)
cdef np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long row_stride, long col_stride):
cdef double[:,:] mem_view = <double[:rows,:cols]>data
dtype = 'double'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long row_stride, long col_stride):
cdef double[::1,:] mem_view = <double[:rows:1,:cols]>data
dtype = 'double'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long row_stride, long col_stride):
cdef double[:,:] mem_view = <double[:rows,:cols]>data
dtype = 'double'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long row_stride, long col_stride):
cdef double[::1,:] mem_view = <double[:rows:1,:cols]>data
dtype = 'double'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long row_stride, long col_stride):
cdef float[:,:] mem_view = <float[:rows,:cols]>data
dtype = 'float'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long row_stride, long col_stride):
cdef float[::1,:] mem_view = <float[:rows:1,:cols]>data
dtype = 'float'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long row_stride, long col_stride):
cdef float[:,:] mem_view = <float[:rows,:cols]>data
dtype = 'float'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long row_stride, long col_stride):
cdef float[::1,:] mem_view = <float[:rows:1,:cols]>data
dtype = 'float'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long row_stride, long col_stride):
cdef long[:,:] mem_view = <long[:rows,:cols]>data
dtype = 'int_'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long row_stride, long col_stride):
cdef long[::1,:] mem_view = <long[:rows:1,:cols]>data
dtype = 'int_'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long row_stride, long col_stride):
cdef long[:,:] mem_view = <long[:rows,:cols]>data
dtype = 'int_'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long row_stride, long col_stride):
cdef long[::1,:] mem_view = <long[:rows:1,:cols]>data
dtype = 'int_'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned long[:,:] mem_view = <unsigned long[:rows,:cols]>data
dtype = 'uint'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned long[::1,:] mem_view = <unsigned long[:rows:1,:cols]>data
dtype = 'uint'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned long[:,:] mem_view = <unsigned long[:rows,:cols]>data
dtype = 'uint'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned long[::1,:] mem_view = <unsigned long[:rows:1,:cols]>data
dtype = 'uint'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long row_stride, long col_stride):
cdef int[:,:] mem_view = <int[:rows,:cols]>data
dtype = 'int'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long row_stride, long col_stride):
cdef int[::1,:] mem_view = <int[:rows:1,:cols]>data
dtype = 'int'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long row_stride, long col_stride):
cdef int[:,:] mem_view = <int[:rows,:cols]>data
dtype = 'int'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long row_stride, long col_stride):
cdef int[::1,:] mem_view = <int[:rows:1,:cols]>data
dtype = 'int'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned int[:,:] mem_view = <unsigned int[:rows,:cols]>data
dtype = 'uint'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned int[::1,:] mem_view = <unsigned int[:rows:1,:cols]>data
dtype = 'uint'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned int[:,:] mem_view = <unsigned int[:rows,:cols]>data
dtype = 'uint'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned int[::1,:] mem_view = <unsigned int[:rows:1,:cols]>data
dtype = 'uint'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long row_stride, long col_stride):
cdef short[:,:] mem_view = <short[:rows,:cols]>data
dtype = 'short'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long row_stride, long col_stride):
cdef short[::1,:] mem_view = <short[:rows:1,:cols]>data
dtype = 'short'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long row_stride, long col_stride):
cdef short[:,:] mem_view = <short[:rows,:cols]>data
dtype = 'short'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long row_stride, long col_stride):
cdef short[::1,:] mem_view = <short[:rows:1,:cols]>data
dtype = 'short'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned short[:,:] mem_view = <unsigned short[:rows,:cols]>data
dtype = 'ushort'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned short[::1,:] mem_view = <unsigned short[:rows:1,:cols]>data
dtype = 'ushort'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned short[:,:] mem_view = <unsigned short[:rows,:cols]>data
dtype = 'ushort'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned short[::1,:] mem_view = <unsigned short[:rows:1,:cols]>data
dtype = 'ushort'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long row_stride, long col_stride):
cdef signed char[:,:] mem_view = <signed char[:rows,:cols]>data
dtype = 'int8'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long row_stride, long col_stride):
cdef signed char[::1,:] mem_view = <signed char[:rows:1,:cols]>data
dtype = 'int8'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long row_stride, long col_stride):
cdef signed char[:,:] mem_view = <signed char[:rows,:cols]>data
dtype = 'int8'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long row_stride, long col_stride):
cdef signed char[::1,:] mem_view = <signed char[:rows:1,:cols]>data
dtype = 'int8'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned char[:,:] mem_view = <unsigned char[:rows,:cols]>data
dtype = 'uint8'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned char[::1,:] mem_view = <unsigned char[:rows:1,:cols]>data
dtype = 'uint8'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned char[:,:] mem_view = <unsigned char[:rows,:cols]>data
dtype = 'uint8'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long row_stride, long col_stride):
cdef unsigned char[::1,:] mem_view = <unsigned char[:rows:1,:cols]>data
dtype = 'uint8'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride):
cdef np.complex128_t[:,:] mem_view = <np.complex128_t[:rows,:cols]>data
dtype = 'complex128'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride):
cdef np.complex128_t[::1,:] mem_view = <np.complex128_t[:rows:1,:cols]>data
dtype = 'complex128'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride):
cdef np.complex128_t[:,:] mem_view = <np.complex128_t[:rows,:cols]>data
dtype = 'complex128'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride):
cdef np.complex128_t[::1,:] mem_view = <np.complex128_t[:rows:1,:cols]>data
dtype = 'complex128'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride):
cdef np.complex64_t[:,:] mem_view = <np.complex64_t[:rows,:cols]>data
dtype = 'complex64'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride):
cdef np.complex64_t[::1,:] mem_view = <np.complex64_t[:rows:1,:cols]>data
dtype = 'complex64'
cdef int itemsize = np.dtype(dtype).itemsize
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
@cython.boundscheck(False)
cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride):
cdef np.complex64_t[:,:] mem_view = <np.complex64_t[:rows,:cols]>data
dtype = 'complex64'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
@cython.boundscheck(False)
cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride):
cdef np.complex64_t[::1,:] mem_view = <np.complex64_t[:rows:1,:cols]>data
dtype = 'complex64'
cdef int itemsize = np.dtype(dtype).itemsize
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))

View File

@ -1,917 +0,0 @@
cimport cython
cimport numpy as np
ctypedef signed char schar;
ctypedef unsigned char uchar;
ctypedef fused dtype:
uchar
schar
short
int
long
float
double
ctypedef fused DenseType:
Matrix
Array
ctypedef fused Rows:
_1
_2
_3
_4
_5
_6
_7
_8
_9
_10
_11
_12
_13
_14
_15
_16
_17
_18
_19
_20
_21
_22
_23
_24
_25
_26
_27
_28
_29
_30
_31
_32
Dynamic
ctypedef Rows Cols
ctypedef Rows StrideOuter
ctypedef Rows StrideInner
ctypedef fused DenseTypeShort:
Vector1i
Vector2i
Vector3i
Vector4i
VectorXi
RowVector1i
RowVector2i
RowVector3i
RowVector4i
RowVectorXi
Matrix1i
Matrix2i
Matrix3i
Matrix4i
MatrixXi
Vector1f
Vector2f
Vector3f
Vector4f
VectorXf
RowVector1f
RowVector2f
RowVector3f
RowVector4f
RowVectorXf
Matrix1f
Matrix2f
Matrix3f
Matrix4f
MatrixXf
Vector1d
Vector2d
Vector3d
Vector4d
VectorXd
RowVector1d
RowVector2d
RowVector3d
RowVector4d
RowVectorXd
Matrix1d
Matrix2d
Matrix3d
Matrix4d
MatrixXd
Vector1cf
Vector2cf
Vector3cf
Vector4cf
VectorXcf
RowVector1cf
RowVector2cf
RowVector3cf
RowVector4cf
RowVectorXcf
Matrix1cf
Matrix2cf
Matrix3cf
Matrix4cf
MatrixXcf
Vector1cd
Vector2cd
Vector3cd
Vector4cd
VectorXcd
RowVector1cd
RowVector2cd
RowVector3cd
RowVector4cd
RowVectorXcd
Matrix1cd
Matrix2cd
Matrix3cd
Matrix4cd
MatrixXcd
Array22i
Array23i
Array24i
Array2Xi
Array32i
Array33i
Array34i
Array3Xi
Array42i
Array43i
Array44i
Array4Xi
ArrayX2i
ArrayX3i
ArrayX4i
ArrayXXi
Array2i
Array3i
Array4i
ArrayXi
Array22f
Array23f
Array24f
Array2Xf
Array32f
Array33f
Array34f
Array3Xf
Array42f
Array43f
Array44f
Array4Xf
ArrayX2f
ArrayX3f
ArrayX4f
ArrayXXf
Array2f
Array3f
Array4f
ArrayXf
Array22d
Array23d
Array24d
Array2Xd
Array32d
Array33d
Array34d
Array3Xd
Array42d
Array43d
Array44d
Array4Xd
ArrayX2d
ArrayX3d
ArrayX4d
ArrayXXd
Array2d
Array3d
Array4d
ArrayXd
Array22cf
Array23cf
Array24cf
Array2Xcf
Array32cf
Array33cf
Array34cf
Array3Xcf
Array42cf
Array43cf
Array44cf
Array4Xcf
ArrayX2cf
ArrayX3cf
ArrayX4cf
ArrayXXcf
Array2cf
Array3cf
Array4cf
ArrayXcf
Array22cd
Array23cd
Array24cd
Array2Xcd
Array32cd
Array33cd
Array34cd
Array3Xcd
Array42cd
Array43cd
Array44cd
Array4Xcd
ArrayX2cd
ArrayX3cd
ArrayX4cd
ArrayXXcd
Array2cd
Array3cd
Array4cd
ArrayXcd
ctypedef fused StorageOrder:
RowMajor
ColMajor
ctypedef fused MapOptions:
Aligned
Unaligned
cdef extern from "eigency_cpp.h" namespace "eigency":
cdef cppclass _1 "1":
pass
cdef cppclass _2 "2":
pass
cdef cppclass _3 "3":
pass
cdef cppclass _4 "4":
pass
cdef cppclass _5 "5":
pass
cdef cppclass _6 "6":
pass
cdef cppclass _7 "7":
pass
cdef cppclass _8 "8":
pass
cdef cppclass _9 "9":
pass
cdef cppclass _10 "10":
pass
cdef cppclass _11 "11":
pass
cdef cppclass _12 "12":
pass
cdef cppclass _13 "13":
pass
cdef cppclass _14 "14":
pass
cdef cppclass _15 "15":
pass
cdef cppclass _16 "16":
pass
cdef cppclass _17 "17":
pass
cdef cppclass _18 "18":
pass
cdef cppclass _19 "19":
pass
cdef cppclass _20 "20":
pass
cdef cppclass _21 "21":
pass
cdef cppclass _22 "22":
pass
cdef cppclass _23 "23":
pass
cdef cppclass _24 "24":
pass
cdef cppclass _25 "25":
pass
cdef cppclass _26 "26":
pass
cdef cppclass _27 "27":
pass
cdef cppclass _28 "28":
pass
cdef cppclass _29 "29":
pass
cdef cppclass _30 "30":
pass
cdef cppclass _31 "31":
pass
cdef cppclass _32 "32":
pass
cdef cppclass PlainObjectBase:
pass
cdef cppclass Map[DenseTypeShort](PlainObjectBase):
Map() except +
Map(np.ndarray array) except +
cdef cppclass FlattenedMap[DenseType, dtype, Rows, Cols]:
FlattenedMap() except +
FlattenedMap(np.ndarray array) except +
cdef cppclass FlattenedMapWithOrder "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder]:
FlattenedMapWithOrder() except +
FlattenedMapWithOrder(np.ndarray array) except +
cdef cppclass FlattenedMapWithStride "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder, MapOptions, StrideOuter, StrideInner]:
FlattenedMapWithStride() except +
FlattenedMapWithStride(np.ndarray array) except +
cdef np.ndarray ndarray_view(PlainObjectBase &)
cdef np.ndarray ndarray_copy(PlainObjectBase &)
cdef np.ndarray ndarray(PlainObjectBase &)
cdef extern from "eigency_cpp.h" namespace "Eigen":
cdef cppclass Dynamic:
pass
cdef cppclass RowMajor:
pass
cdef cppclass ColMajor:
pass
cdef cppclass Aligned:
pass
cdef cppclass Unaligned:
pass
cdef cppclass Matrix(PlainObjectBase):
pass
cdef cppclass Array(PlainObjectBase):
pass
cdef cppclass VectorXd(PlainObjectBase):
pass
cdef cppclass Vector1i(PlainObjectBase):
pass
cdef cppclass Vector2i(PlainObjectBase):
pass
cdef cppclass Vector3i(PlainObjectBase):
pass
cdef cppclass Vector4i(PlainObjectBase):
pass
cdef cppclass VectorXi(PlainObjectBase):
pass
cdef cppclass RowVector1i(PlainObjectBase):
pass
cdef cppclass RowVector2i(PlainObjectBase):
pass
cdef cppclass RowVector3i(PlainObjectBase):
pass
cdef cppclass RowVector4i(PlainObjectBase):
pass
cdef cppclass RowVectorXi(PlainObjectBase):
pass
cdef cppclass Matrix1i(PlainObjectBase):
pass
cdef cppclass Matrix2i(PlainObjectBase):
pass
cdef cppclass Matrix3i(PlainObjectBase):
pass
cdef cppclass Matrix4i(PlainObjectBase):
pass
cdef cppclass MatrixXi(PlainObjectBase):
pass
cdef cppclass Vector1f(PlainObjectBase):
pass
cdef cppclass Vector2f(PlainObjectBase):
pass
cdef cppclass Vector3f(PlainObjectBase):
pass
cdef cppclass Vector4f(PlainObjectBase):
pass
cdef cppclass VectorXf(PlainObjectBase):
pass
cdef cppclass RowVector1f(PlainObjectBase):
pass
cdef cppclass RowVector2f(PlainObjectBase):
pass
cdef cppclass RowVector3f(PlainObjectBase):
pass
cdef cppclass RowVector4f(PlainObjectBase):
pass
cdef cppclass RowVectorXf(PlainObjectBase):
pass
cdef cppclass Matrix1f(PlainObjectBase):
pass
cdef cppclass Matrix2f(PlainObjectBase):
pass
cdef cppclass Matrix3f(PlainObjectBase):
pass
cdef cppclass Matrix4f(PlainObjectBase):
pass
cdef cppclass MatrixXf(PlainObjectBase):
pass
cdef cppclass Vector1d(PlainObjectBase):
pass
cdef cppclass Vector2d(PlainObjectBase):
pass
cdef cppclass Vector3d(PlainObjectBase):
pass
cdef cppclass Vector4d(PlainObjectBase):
pass
cdef cppclass VectorXd(PlainObjectBase):
pass
cdef cppclass RowVector1d(PlainObjectBase):
pass
cdef cppclass RowVector2d(PlainObjectBase):
pass
cdef cppclass RowVector3d(PlainObjectBase):
pass
cdef cppclass RowVector4d(PlainObjectBase):
pass
cdef cppclass RowVectorXd(PlainObjectBase):
pass
cdef cppclass Matrix1d(PlainObjectBase):
pass
cdef cppclass Matrix2d(PlainObjectBase):
pass
cdef cppclass Matrix3d(PlainObjectBase):
pass
cdef cppclass Matrix4d(PlainObjectBase):
pass
cdef cppclass MatrixXd(PlainObjectBase):
pass
cdef cppclass Vector1cf(PlainObjectBase):
pass
cdef cppclass Vector2cf(PlainObjectBase):
pass
cdef cppclass Vector3cf(PlainObjectBase):
pass
cdef cppclass Vector4cf(PlainObjectBase):
pass
cdef cppclass VectorXcf(PlainObjectBase):
pass
cdef cppclass RowVector1cf(PlainObjectBase):
pass
cdef cppclass RowVector2cf(PlainObjectBase):
pass
cdef cppclass RowVector3cf(PlainObjectBase):
pass
cdef cppclass RowVector4cf(PlainObjectBase):
pass
cdef cppclass RowVectorXcf(PlainObjectBase):
pass
cdef cppclass Matrix1cf(PlainObjectBase):
pass
cdef cppclass Matrix2cf(PlainObjectBase):
pass
cdef cppclass Matrix3cf(PlainObjectBase):
pass
cdef cppclass Matrix4cf(PlainObjectBase):
pass
cdef cppclass MatrixXcf(PlainObjectBase):
pass
cdef cppclass Vector1cd(PlainObjectBase):
pass
cdef cppclass Vector2cd(PlainObjectBase):
pass
cdef cppclass Vector3cd(PlainObjectBase):
pass
cdef cppclass Vector4cd(PlainObjectBase):
pass
cdef cppclass VectorXcd(PlainObjectBase):
pass
cdef cppclass RowVector1cd(PlainObjectBase):
pass
cdef cppclass RowVector2cd(PlainObjectBase):
pass
cdef cppclass RowVector3cd(PlainObjectBase):
pass
cdef cppclass RowVector4cd(PlainObjectBase):
pass
cdef cppclass RowVectorXcd(PlainObjectBase):
pass
cdef cppclass Matrix1cd(PlainObjectBase):
pass
cdef cppclass Matrix2cd(PlainObjectBase):
pass
cdef cppclass Matrix3cd(PlainObjectBase):
pass
cdef cppclass Matrix4cd(PlainObjectBase):
pass
cdef cppclass MatrixXcd(PlainObjectBase):
pass
cdef cppclass Array22i(PlainObjectBase):
pass
cdef cppclass Array23i(PlainObjectBase):
pass
cdef cppclass Array24i(PlainObjectBase):
pass
cdef cppclass Array2Xi(PlainObjectBase):
pass
cdef cppclass Array32i(PlainObjectBase):
pass
cdef cppclass Array33i(PlainObjectBase):
pass
cdef cppclass Array34i(PlainObjectBase):
pass
cdef cppclass Array3Xi(PlainObjectBase):
pass
cdef cppclass Array42i(PlainObjectBase):
pass
cdef cppclass Array43i(PlainObjectBase):
pass
cdef cppclass Array44i(PlainObjectBase):
pass
cdef cppclass Array4Xi(PlainObjectBase):
pass
cdef cppclass ArrayX2i(PlainObjectBase):
pass
cdef cppclass ArrayX3i(PlainObjectBase):
pass
cdef cppclass ArrayX4i(PlainObjectBase):
pass
cdef cppclass ArrayXXi(PlainObjectBase):
pass
cdef cppclass Array2i(PlainObjectBase):
pass
cdef cppclass Array3i(PlainObjectBase):
pass
cdef cppclass Array4i(PlainObjectBase):
pass
cdef cppclass ArrayXi(PlainObjectBase):
pass
cdef cppclass Array22f(PlainObjectBase):
pass
cdef cppclass Array23f(PlainObjectBase):
pass
cdef cppclass Array24f(PlainObjectBase):
pass
cdef cppclass Array2Xf(PlainObjectBase):
pass
cdef cppclass Array32f(PlainObjectBase):
pass
cdef cppclass Array33f(PlainObjectBase):
pass
cdef cppclass Array34f(PlainObjectBase):
pass
cdef cppclass Array3Xf(PlainObjectBase):
pass
cdef cppclass Array42f(PlainObjectBase):
pass
cdef cppclass Array43f(PlainObjectBase):
pass
cdef cppclass Array44f(PlainObjectBase):
pass
cdef cppclass Array4Xf(PlainObjectBase):
pass
cdef cppclass ArrayX2f(PlainObjectBase):
pass
cdef cppclass ArrayX3f(PlainObjectBase):
pass
cdef cppclass ArrayX4f(PlainObjectBase):
pass
cdef cppclass ArrayXXf(PlainObjectBase):
pass
cdef cppclass Array2f(PlainObjectBase):
pass
cdef cppclass Array3f(PlainObjectBase):
pass
cdef cppclass Array4f(PlainObjectBase):
pass
cdef cppclass ArrayXf(PlainObjectBase):
pass
cdef cppclass Array22d(PlainObjectBase):
pass
cdef cppclass Array23d(PlainObjectBase):
pass
cdef cppclass Array24d(PlainObjectBase):
pass
cdef cppclass Array2Xd(PlainObjectBase):
pass
cdef cppclass Array32d(PlainObjectBase):
pass
cdef cppclass Array33d(PlainObjectBase):
pass
cdef cppclass Array34d(PlainObjectBase):
pass
cdef cppclass Array3Xd(PlainObjectBase):
pass
cdef cppclass Array42d(PlainObjectBase):
pass
cdef cppclass Array43d(PlainObjectBase):
pass
cdef cppclass Array44d(PlainObjectBase):
pass
cdef cppclass Array4Xd(PlainObjectBase):
pass
cdef cppclass ArrayX2d(PlainObjectBase):
pass
cdef cppclass ArrayX3d(PlainObjectBase):
pass
cdef cppclass ArrayX4d(PlainObjectBase):
pass
cdef cppclass ArrayXXd(PlainObjectBase):
pass
cdef cppclass Array2d(PlainObjectBase):
pass
cdef cppclass Array3d(PlainObjectBase):
pass
cdef cppclass Array4d(PlainObjectBase):
pass
cdef cppclass ArrayXd(PlainObjectBase):
pass
cdef cppclass Array22cf(PlainObjectBase):
pass
cdef cppclass Array23cf(PlainObjectBase):
pass
cdef cppclass Array24cf(PlainObjectBase):
pass
cdef cppclass Array2Xcf(PlainObjectBase):
pass
cdef cppclass Array32cf(PlainObjectBase):
pass
cdef cppclass Array33cf(PlainObjectBase):
pass
cdef cppclass Array34cf(PlainObjectBase):
pass
cdef cppclass Array3Xcf(PlainObjectBase):
pass
cdef cppclass Array42cf(PlainObjectBase):
pass
cdef cppclass Array43cf(PlainObjectBase):
pass
cdef cppclass Array44cf(PlainObjectBase):
pass
cdef cppclass Array4Xcf(PlainObjectBase):
pass
cdef cppclass ArrayX2cf(PlainObjectBase):
pass
cdef cppclass ArrayX3cf(PlainObjectBase):
pass
cdef cppclass ArrayX4cf(PlainObjectBase):
pass
cdef cppclass ArrayXXcf(PlainObjectBase):
pass
cdef cppclass Array2cf(PlainObjectBase):
pass
cdef cppclass Array3cf(PlainObjectBase):
pass
cdef cppclass Array4cf(PlainObjectBase):
pass
cdef cppclass ArrayXcf(PlainObjectBase):
pass
cdef cppclass Array22cd(PlainObjectBase):
pass
cdef cppclass Array23cd(PlainObjectBase):
pass
cdef cppclass Array24cd(PlainObjectBase):
pass
cdef cppclass Array2Xcd(PlainObjectBase):
pass
cdef cppclass Array32cd(PlainObjectBase):
pass
cdef cppclass Array33cd(PlainObjectBase):
pass
cdef cppclass Array34cd(PlainObjectBase):
pass
cdef cppclass Array3Xcd(PlainObjectBase):
pass
cdef cppclass Array42cd(PlainObjectBase):
pass
cdef cppclass Array43cd(PlainObjectBase):
pass
cdef cppclass Array44cd(PlainObjectBase):
pass
cdef cppclass Array4Xcd(PlainObjectBase):
pass
cdef cppclass ArrayX2cd(PlainObjectBase):
pass
cdef cppclass ArrayX3cd(PlainObjectBase):
pass
cdef cppclass ArrayX4cd(PlainObjectBase):
pass
cdef cppclass ArrayXXcd(PlainObjectBase):
pass
cdef cppclass Array2cd(PlainObjectBase):
pass
cdef cppclass Array3cd(PlainObjectBase):
pass
cdef cppclass Array4cd(PlainObjectBase):
pass
cdef cppclass ArrayXcd(PlainObjectBase):
pass

View File

@ -1 +0,0 @@

View File

@ -1,504 +0,0 @@
#include <Eigen/Dense>
#include <iostream>
#include <stdexcept>
#include <complex>
typedef ::std::complex< double > __pyx_t_double_complex;
typedef ::std::complex< float > __pyx_t_float_complex;
#include "conversions_api.h"
#ifndef EIGENCY_CPP
#define EIGENCY_CPP
namespace eigency {
template<typename Scalar>
inline PyArrayObject *_ndarray_view(Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0);
template<typename Scalar>
inline PyArrayObject *_ndarray_copy(const Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0);
// Strides:
// Eigen and numpy differ in their way of dealing with strides. Eigen has the concept of outer and
// inner strides, which are dependent on whether the array/matrix is row-major of column-major:
// Inner stride: denotes the offset between succeeding elements in each row (row-major) or column (column-major).
// Outer stride: denotes the offset between succeeding rows (row-major) or succeeding columns (column-major).
// In contrast, numpy's stride is simply a measure of how fast each dimension should be incremented.
// Consequently, a switch in numpy storage order from row-major to column-major involves a switch
// in strides, while it does not affect the stride in Eigen.
template<>
inline PyArrayObject *_ndarray_view<double>(double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major) {
// Eigen row-major mode: row_stride=outer_stride, and col_stride=inner_stride
// If no stride is given, the row_stride is set to the number of columns.
return ndarray_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
} else {
// Eigen column-major mode: row_stride=outer_stride, and col_stride=inner_stride
// If no stride is given, the cow_stride is set to the number of rows.
return ndarray_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
}
template<>
inline PyArrayObject *_ndarray_copy<double>(const double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_copy_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_copy_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_view<float>(float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_copy<float>(const float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_copy_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_copy_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_view<long>(long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_copy<long>(const long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_copy_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_copy_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_view<unsigned long>(unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_copy<unsigned long>(const unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_copy_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_copy_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_view<int>(int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_copy<int>(const int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_copy_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_copy_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_view<unsigned int>(unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_copy<unsigned int>(const unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_copy_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_copy_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_view<short>(short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_copy<short>(const short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_copy_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_copy_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_view<unsigned short>(unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_copy<unsigned short>(const unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_copy_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_copy_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_view<signed char>(signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_copy<signed char>(const signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_copy_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_copy_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_view<unsigned char>(unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_copy<unsigned char>(const unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_copy_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_copy_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_view<std::complex<double> >(std::complex<double> *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_copy<std::complex<double> >(const std::complex<double> *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_copy_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_copy_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_view<std::complex<float> >(std::complex<float> *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template<>
inline PyArrayObject *_ndarray_copy<std::complex<float> >(const std::complex<float> *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
if (is_row_major)
return ndarray_copy_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
else
return ndarray_copy_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
}
template <typename Derived>
inline PyArrayObject *ndarray(Eigen::PlainObjectBase<Derived> &m) {
import_gtsam_eigency__conversions();
return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor);
}
// If C++11 is available, check if m is an r-value reference, in
// which case a copy should always be made
#if __cplusplus >= 201103L
template <typename Derived>
inline PyArrayObject *ndarray(Eigen::PlainObjectBase<Derived> &&m) {
import_gtsam_eigency__conversions();
return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor);
}
#endif
template <typename Derived>
inline PyArrayObject *ndarray(const Eigen::PlainObjectBase<Derived> &m) {
import_gtsam_eigency__conversions();
return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor);
}
template <typename Derived>
inline PyArrayObject *ndarray_view(Eigen::PlainObjectBase<Derived> &m) {
import_gtsam_eigency__conversions();
return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor);
}
template <typename Derived>
inline PyArrayObject *ndarray_view(const Eigen::PlainObjectBase<Derived> &m) {
import_gtsam_eigency__conversions();
return _ndarray_view(const_cast<typename Derived::Scalar*>(m.data()), m.rows(), m.cols(), m.IsRowMajor);
}
template <typename Derived>
inline PyArrayObject *ndarray_copy(const Eigen::PlainObjectBase<Derived> &m) {
import_gtsam_eigency__conversions();
return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor);
}
template <typename Derived, int MapOptions, typename Stride>
inline PyArrayObject *ndarray(Eigen::Map<Derived, MapOptions, Stride> &m) {
import_gtsam_eigency__conversions();
return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
}
template <typename Derived, int MapOptions, typename Stride>
inline PyArrayObject *ndarray(const Eigen::Map<Derived, MapOptions, Stride> &m) {
import_gtsam_eigency__conversions();
// Since this is a map, we assume that ownership is correctly taken care
// of, and we avoid taking a copy
return _ndarray_view(const_cast<typename Derived::Scalar*>(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
}
template <typename Derived, int MapOptions, typename Stride>
inline PyArrayObject *ndarray_view(Eigen::Map<Derived, MapOptions, Stride> &m) {
import_gtsam_eigency__conversions();
return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
}
template <typename Derived, int MapOptions, typename Stride>
inline PyArrayObject *ndarray_view(const Eigen::Map<Derived, MapOptions, Stride> &m) {
import_gtsam_eigency__conversions();
return _ndarray_view(const_cast<typename Derived::Scalar*>(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
}
template <typename Derived, int MapOptions, typename Stride>
inline PyArrayObject *ndarray_copy(const Eigen::Map<Derived, MapOptions, Stride> &m) {
import_gtsam_eigency__conversions();
return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
}
template <typename MatrixType,
int _MapOptions = Eigen::Unaligned,
typename _StrideType=Eigen::Stride<0,0> >
class MapBase: public Eigen::Map<MatrixType, _MapOptions, _StrideType> {
public:
typedef Eigen::Map<MatrixType, _MapOptions, _StrideType> Base;
typedef typename Base::Scalar Scalar;
MapBase(Scalar* data,
long rows,
long cols,
_StrideType stride=_StrideType())
: Base(data,
// If both dimensions are dynamic or dimensions match, accept dimensions as they are
((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) ||
(Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols))
? rows
// otherwise, test if swapping them makes them fit
: ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows)
? cols
: rows),
((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) ||
(Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols))
? cols
: ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows)
? rows
: cols),
stride
) {}
MapBase &operator=(const MatrixType &other) {
Base::operator=(other);
return *this;
}
virtual ~MapBase() { }
};
template <template<class,int,int,int,int,int> class EigencyDenseBase,
typename Scalar,
int _Rows, int _Cols,
int _Options = Eigen::AutoAlign |
#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4
// workaround a bug in at least gcc 3.4.6
// the innermost ?: ternary operator is misparsed. We write it slightly
// differently and this makes gcc 3.4.6 happy, but it's ugly.
// The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
// (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
// EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19
#if EIGEN_VERSION_AT_LEAST(3,2,90)
: !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
#else
: !(_Cols==1 && _Rows!=1) ? Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
#endif
: ColMajor ),
#else
( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
: (_Cols==1 && _Rows!=1) ? Eigen::ColMajor
// EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19
#if EIGEN_VERSION_AT_LEAST(3,2,90)
: EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
#else
: Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
#endif
#endif
int _MapOptions = Eigen::Unaligned,
int _StrideOuter=0, int _StrideInner=0,
int _MaxRows = _Rows,
int _MaxCols = _Cols>
class FlattenedMap: public MapBase<EigencyDenseBase<Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > {
public:
typedef MapBase<EigencyDenseBase<Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > Base;
FlattenedMap()
: Base(NULL, 0, 0),
object_(NULL) {}
FlattenedMap(Scalar *data, long rows, long cols, long outer_stride=0, long inner_stride=0)
: Base(data, rows, cols,
Eigen::Stride<_StrideOuter, _StrideInner>(outer_stride, inner_stride)),
object_(NULL) {
}
FlattenedMap(PyArrayObject *object)
: Base((Scalar *)((PyArrayObject*)object)->data,
// : Base(_from_numpy<Scalar>((PyArrayObject*)object),
(((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1,
(((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0],
Eigen::Stride<_StrideOuter, _StrideInner>(_StrideOuter != Eigen::Dynamic ? _StrideOuter : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1,
_StrideInner != Eigen::Dynamic ? _StrideInner : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0])),
object_(object) {
if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object))
throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map.");
Py_XINCREF(object_);
}
FlattenedMap &operator=(const FlattenedMap &other) {
if (other.object_) {
new (this) FlattenedMap(other.object_);
} else {
// Replace the memory that we point to (not a memory allocation)
new (this) FlattenedMap(const_cast<Scalar*>(other.data()),
other.rows(),
other.cols(),
other.outerStride(),
other.innerStride());
}
return *this;
}
operator Base() const {
return static_cast<Base>(*this);
}
operator Base&() const {
return static_cast<Base&>(*this);
}
operator EigencyDenseBase<Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>() const {
return EigencyDenseBase<Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>(static_cast<Base>(*this));
}
virtual ~FlattenedMap() {
Py_XDECREF(object_);
}
private:
PyArrayObject * const object_;
};
template <typename MatrixType>
class Map: public MapBase<MatrixType> {
public:
typedef MapBase<MatrixType> Base;
typedef typename MatrixType::Scalar Scalar;
enum {
RowsAtCompileTime = Base::Base::RowsAtCompileTime,
ColsAtCompileTime = Base::Base::ColsAtCompileTime
};
Map()
: Base(NULL,
(RowsAtCompileTime == Eigen::Dynamic) ? 0 : RowsAtCompileTime,
(ColsAtCompileTime == Eigen::Dynamic) ? 0 : ColsAtCompileTime),
object_(NULL) {
}
Map(Scalar *data, long rows, long cols)
: Base(data, rows, cols),
object_(NULL) {}
Map(PyArrayObject *object)
: Base((PyObject*)object == Py_None? NULL: (Scalar *)object->data,
// ROW: If array is in row-major order, transpose (see README)
(PyObject*)object == Py_None? 0 :
(!PyArray_IS_F_CONTIGUOUS(object)
? ((object->nd == 1)
? 1 // ROW: If 1D row-major numpy array, set to 1 (row vector)
: object->dimensions[1])
: object->dimensions[0]),
// COLUMN: If array is in row-major order: transpose (see README)
(PyObject*)object == Py_None? 0 :
(!PyArray_IS_F_CONTIGUOUS(object)
? object->dimensions[0]
: ((object->nd == 1)
? 1 // COLUMN: If 1D col-major numpy array, set to length (column vector)
: object->dimensions[1]))),
object_(object) {
if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object))
throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map.");
Py_XINCREF(object_);
}
Map &operator=(const Map &other) {
if (other.object_) {
new (this) Map(other.object_);
} else {
// Replace the memory that we point to (not a memory allocation)
new (this) Map(const_cast<Scalar*>(other.data()),
other.rows(),
other.cols());
}
return *this;
}
Map &operator=(const MatrixType &other) {
MapBase<MatrixType>::operator=(other);
return *this;
}
operator Base() const {
return static_cast<Base>(*this);
}
operator Base&() const {
return static_cast<Base&>(*this);
}
operator MatrixType() const {
return MatrixType(static_cast<Base>(*this));
}
virtual ~Map() {
Py_XDECREF(object_);
}
private:
PyArrayObject * const object_;
};
}
#endif

View File

@ -1,3 +0,0 @@
Cython>=0.25.2
backports_abc>=0.5
numpy>=1.11.0

View File

@ -1,6 +1,6 @@
VERTEX_SE3:QUAT 0 -3.865747774038187 0.06639337702667497 -0.16064874691945374 0.024595211709139555 0.49179523413089893 -0.06279232989379242 0.8680954132776109
VERTEX_SE3:QUAT 1 -3.614793159814815 0.04774490041587656 -0.2837650367985949 0.00991721787943912 0.4854918961891193 -0.042343290945895576 0.8731588132957809
VERTEX_SE3:QUAT 2 -3.255096913553434 0.013296754286114112 -0.5339792269680574 -0.027851108010665374 0.585478168397957 -0.05088341463532465 0.8086102325762403
EDGE_SE3:QUAT 0 1 0.2509546142233723 -0.01864847661079841 -0.12311628987914114 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
EDGE_SE3:QUAT 0 2 0.6106508604847534 -0.05309662274056086 -0.3733304800486037 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
EDGE_SE3:QUAT 1 2 0.3596962462613811 -0.03444814612976245 -0.25021419016946256 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
VERTEX_SE3:QUAT 0 -1.6618596980158338 -0.5736497760548741 -3.3319774096611026 -0.02676080288219576 -0.024497002638379624 -0.015064701622500615 0.9992281076190063
VERTEX_SE3:QUAT 1 -1.431820463019384 -0.549139761976065 -3.160677992237872 -0.049543805396343954 -0.03232420352077356 -0.004386230477751116 0.998239108728862
VERTEX_SE3:QUAT 2 -1.0394840214436651 -0.5268841046291037 -2.972143862665523 -0.07993768981394891 0.0825062894866454 -0.04088089479075661 0.9925378735259738
EDGE_SE3:QUAT 0 1 0.23003923499644974 0.02451001407880915 0.17129941742323052 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
EDGE_SE3:QUAT 0 2 0.6223756765721686 0.04676567142577037 0.35983354699557957 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
EDGE_SE3:QUAT 1 2 0.3923364415757189 0.022255657346961222 0.18853412957234905 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0

View File

@ -1,16 +1,16 @@
VERTEX_SE3:QUAT 8646911284551352320 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162
VERTEX_SE3:QUAT 8646911284551352321 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508
VERTEX_SE3:QUAT 8646911284551352322 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10
VERTEX_SE3:QUAT 8646911284551352323 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508
VERTEX_SE3:QUAT 8646911284551352324 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162
VERTEX_SE3:QUAT 8646911284551352325 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567
VERTEX_SE3:QUAT 8646911284551352326 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412
VERTEX_SE3:QUAT 8646911284551352327 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567
VERTEX_TRACKXYZ 7782220156096217088 10 10 10
VERTEX_TRACKXYZ 7782220156096217089 -10 10 10
VERTEX_TRACKXYZ 7782220156096217090 -10 -10 10
VERTEX_TRACKXYZ 7782220156096217091 10 -10 10
VERTEX_TRACKXYZ 7782220156096217092 10 10 -10
VERTEX_TRACKXYZ 7782220156096217093 -10 10 -10
VERTEX_TRACKXYZ 7782220156096217094 -10 -10 -10
VERTEX_TRACKXYZ 7782220156096217095 10 -10 -10
VERTEX_SE3:QUAT 0 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162
VERTEX_SE3:QUAT 1 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508
VERTEX_SE3:QUAT 2 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10
VERTEX_SE3:QUAT 3 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508
VERTEX_SE3:QUAT 4 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162
VERTEX_SE3:QUAT 5 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567
VERTEX_SE3:QUAT 6 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412
VERTEX_SE3:QUAT 7 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567
VERTEX_TRACKXYZ 0 10 10 10
VERTEX_TRACKXYZ 1 -10 10 10
VERTEX_TRACKXYZ 2 -10 -10 10
VERTEX_TRACKXYZ 3 10 -10 10
VERTEX_TRACKXYZ 4 10 10 -10
VERTEX_TRACKXYZ 5 -10 10 -10
VERTEX_TRACKXYZ 6 -10 -10 -10
VERTEX_TRACKXYZ 7 10 -10 -10

View File

@ -287,7 +287,7 @@ int main(int argc, char* argv[]) {
new_values.insert(current_pose_key, gps_pose);
printf("################ POSE INCLUDED AT TIME %lf ################\n", t);
gps_pose.translation().print();
cout << gps_pose.translation();
printf("\n\n");
} else {
new_values.insert(current_pose_key, current_pose_global);

View File

@ -0,0 +1,125 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file ShonanAveragingCLI.cpp
* @brief Run Shonan Rotation Averaging Algorithm on a file or example dataset
* @author Frank Dellaert
* @date August, 2020
*
* Example usage:
*
* Running without arguments will run on tiny 3D example pose3example-grid
* ./ShonanAveragingCLI
*
* Read 2D dataset w10000 (in examples/data) and output to w10000-rotations.g2o
* ./ShonanAveragingCLI -d 2 -n w10000 -o w10000-rotations.g2o
*
* Read 3D dataset sphere25000.txt and output to shonan.g2o (default)
* ./ShonanAveragingCLI -i spere2500.txt
*
*/
#include <gtsam/base/timing.h>
#include <gtsam/sfm/ShonanAveraging.h>
#include <gtsam/slam/InitializePose.h>
#include <gtsam/slam/dataset.h>
#include <boost/program_options.hpp>
using namespace std;
using namespace gtsam;
namespace po = boost::program_options;
/* ************************************************************************* */
int main(int argc, char* argv[]) {
string datasetName;
string inputFile;
string outputFile;
int d, seed;
po::options_description desc(
"Shonan Rotation Averaging CLI reads a *pose* graph, extracts the "
"rotation constraints, and runs the Shonan algorithm.");
desc.add_options()("help", "Print help message")(
"named_dataset,n",
po::value<string>(&datasetName)->default_value("pose3example-grid"),
"Find and read frome example dataset file")(
"input_file,i", po::value<string>(&inputFile)->default_value(""),
"Read pose constraints graph from the specified file")(
"output_file,o",
po::value<string>(&outputFile)->default_value("shonan.g2o"),
"Write solution to the specified file")(
"dimension,d", po::value<int>(&d)->default_value(3),
"Optimize over 2D or 3D rotations")(
"seed,s", po::value<int>(&seed)->default_value(42),
"Random seed for initial estimate");
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).options(desc).run(), vm);
po::notify(vm);
if (vm.count("help")) {
cout << desc << "\n";
return 1;
}
// Get input file
if (inputFile.empty()) {
if (datasetName.empty()) {
cout << "You must either specify a named dataset or an input file\n"
<< desc << endl;
return 1;
}
inputFile = findExampleDataFile(datasetName);
}
// Seed random number generator
static std::mt19937 rng(seed);
NonlinearFactorGraph::shared_ptr inputGraph;
Values::shared_ptr posesInFile;
Values poses;
if (d == 2) {
cout << "Running Shonan averaging for SO(2) on " << inputFile << endl;
ShonanAveraging2 shonan(inputFile);
auto initial = shonan.initializeRandomly(rng);
auto result = shonan.run(initial);
// Parse file again to set up translation problem, adding a prior
boost::tie(inputGraph, posesInFile) = load2D(inputFile);
auto priorModel = noiseModel::Unit::Create(3);
inputGraph->addPrior(0, posesInFile->at<Pose2>(0), priorModel);
cout << "recovering 2D translations" << endl;
auto poseGraph = initialize::buildPoseGraph<Pose2>(*inputGraph);
poses = initialize::computePoses<Pose2>(result.first, &poseGraph);
} else if (d == 3) {
cout << "Running Shonan averaging for SO(3) on " << inputFile << endl;
ShonanAveraging3 shonan(inputFile);
auto initial = shonan.initializeRandomly(rng);
auto result = shonan.run(initial);
// Parse file again to set up translation problem, adding a prior
boost::tie(inputGraph, posesInFile) = load3D(inputFile);
auto priorModel = noiseModel::Unit::Create(6);
inputGraph->addPrior(0, posesInFile->at<Pose3>(0), priorModel);
cout << "recovering 3D translations" << endl;
auto poseGraph = initialize::buildPoseGraph<Pose3>(*inputGraph);
poses = initialize::computePoses<Pose3>(result.first, &poseGraph);
} else {
cout << "Can only run SO(2) or SO(3) averaging\n" << desc << endl;
return 1;
}
cout << "Writing result to " << outputFile << endl;
writeG2o(NonlinearFactorGraph(), poses, outputFile);
return 0;
}
/* ************************************************************************* */

View File

@ -1,7 +1,8 @@
# install CCOLAMD headers
# install CCOLAMD and SuiteSparse_config headers
install(FILES CCOLAMD/Include/ccolamd.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/CCOLAMD)
install(FILES SuiteSparse_config/SuiteSparse_config.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/SuiteSparse_config)
# install Eigen unless System Eigen is used
if(NOT GTSAM_USE_SYSTEM_EIGEN)
# Find plain .h files
file(GLOB_RECURSE eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/Eigen/*.h")

482
gtsam/3rdparty/Spectra/GenEigsBase.h vendored Normal file
View File

@ -0,0 +1,482 @@
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef GEN_EIGS_BASE_H
#define GEN_EIGS_BASE_H
#include <Eigen/Core>
#include <vector> // std::vector
#include <cmath> // std::abs, std::pow, std::sqrt
#include <algorithm> // std::min, std::copy
#include <complex> // std::complex, std::conj, std::norm, std::abs
#include <stdexcept> // std::invalid_argument
#include "Util/TypeTraits.h"
#include "Util/SelectionRule.h"
#include "Util/CompInfo.h"
#include "Util/SimpleRandom.h"
#include "MatOp/internal/ArnoldiOp.h"
#include "LinAlg/UpperHessenbergQR.h"
#include "LinAlg/DoubleShiftQR.h"
#include "LinAlg/UpperHessenbergEigen.h"
#include "LinAlg/Arnoldi.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This is the base class for general eigen solvers, mainly for internal use.
/// It is kept here to provide the documentation for member functions of concrete eigen solvers
/// such as GenEigsSolver and GenEigsRealShiftSolver.
///
template <typename Scalar,
int SelectionRule,
typename OpType,
typename BOpType>
class GenEigsBase
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> Array;
typedef Eigen::Array<bool, Eigen::Dynamic, 1> BoolArray;
typedef Eigen::Map<Matrix> MapMat;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::Map<const Vector> MapConstVec;
typedef std::complex<Scalar> Complex;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
typedef ArnoldiOp<Scalar, OpType, BOpType> ArnoldiOpType;
typedef Arnoldi<Scalar, ArnoldiOpType> ArnoldiFac;
protected:
// clang-format off
OpType* m_op; // object to conduct matrix operation,
// e.g. matrix-vector product
const Index m_n; // dimension of matrix A
const Index m_nev; // number of eigenvalues requested
const Index m_ncv; // dimension of Krylov subspace in the Arnoldi method
Index m_nmatop; // number of matrix operations called
Index m_niter; // number of restarting iterations
ArnoldiFac m_fac; // Arnoldi factorization
ComplexVector m_ritz_val; // Ritz values
ComplexMatrix m_ritz_vec; // Ritz vectors
ComplexVector m_ritz_est; // last row of m_ritz_vec
private:
BoolArray m_ritz_conv; // indicator of the convergence of Ritz values
int m_info; // status of the computation
const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow
// ~= 1e-307 for the "double" type
const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type
const Scalar m_eps23; // m_eps^(2/3), used to test the convergence
// clang-format on
// Real Ritz values calculated from UpperHessenbergEigen have exact zero imaginary part
// Complex Ritz values have exact conjugate pairs
// So we use exact tests here
static bool is_complex(const Complex& v) { return v.imag() != Scalar(0); }
static bool is_conj(const Complex& v1, const Complex& v2) { return v1 == Eigen::numext::conj(v2); }
// Implicitly restarted Arnoldi factorization
void restart(Index k)
{
using std::norm;
if (k >= m_ncv)
return;
DoubleShiftQR<Scalar> decomp_ds(m_ncv);
UpperHessenbergQR<Scalar> decomp_hb(m_ncv);
Matrix Q = Matrix::Identity(m_ncv, m_ncv);
for (Index i = k; i < m_ncv; i++)
{
if (is_complex(m_ritz_val[i]) && is_conj(m_ritz_val[i], m_ritz_val[i + 1]))
{
// H - mu * I = Q1 * R1
// H <- R1 * Q1 + mu * I = Q1' * H * Q1
// H - conj(mu) * I = Q2 * R2
// H <- R2 * Q2 + conj(mu) * I = Q2' * H * Q2
//
// (H - mu * I) * (H - conj(mu) * I) = Q1 * Q2 * R2 * R1 = Q * R
const Scalar s = Scalar(2) * m_ritz_val[i].real();
const Scalar t = norm(m_ritz_val[i]);
decomp_ds.compute(m_fac.matrix_H(), s, t);
// Q -> Q * Qi
decomp_ds.apply_YQ(Q);
// H -> Q'HQ
// Matrix Q = Matrix::Identity(m_ncv, m_ncv);
// decomp_ds.apply_YQ(Q);
// m_fac_H = Q.transpose() * m_fac_H * Q;
m_fac.compress_H(decomp_ds);
i++;
}
else
{
// QR decomposition of H - mu * I, mu is real
decomp_hb.compute(m_fac.matrix_H(), m_ritz_val[i].real());
// Q -> Q * Qi
decomp_hb.apply_YQ(Q);
// H -> Q'HQ = RQ + mu * I
m_fac.compress_H(decomp_hb);
}
}
m_fac.compress_V(Q);
m_fac.factorize_from(k, m_ncv, m_nmatop);
retrieve_ritzpair();
}
// Calculates the number of converged Ritz values
Index num_converged(Scalar tol)
{
// thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value
Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23);
Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm();
// Converged "wanted" Ritz values
m_ritz_conv = (resid < thresh);
return m_ritz_conv.cast<Index>().sum();
}
// Returns the adjusted nev for restarting
Index nev_adjusted(Index nconv)
{
using std::abs;
Index nev_new = m_nev;
for (Index i = m_nev; i < m_ncv; i++)
if (abs(m_ritz_est[i]) < m_near_0)
nev_new++;
// Adjust nev_new, according to dnaup2.f line 660~674 in ARPACK
nev_new += std::min(nconv, (m_ncv - nev_new) / 2);
if (nev_new == 1 && m_ncv >= 6)
nev_new = m_ncv / 2;
else if (nev_new == 1 && m_ncv > 3)
nev_new = 2;
if (nev_new > m_ncv - 2)
nev_new = m_ncv - 2;
// Increase nev by one if ritz_val[nev - 1] and
// ritz_val[nev] are conjugate pairs
if (is_complex(m_ritz_val[nev_new - 1]) &&
is_conj(m_ritz_val[nev_new - 1], m_ritz_val[nev_new]))
{
nev_new++;
}
return nev_new;
}
// Retrieves and sorts Ritz values and Ritz vectors
void retrieve_ritzpair()
{
UpperHessenbergEigen<Scalar> decomp(m_fac.matrix_H());
const ComplexVector& evals = decomp.eigenvalues();
ComplexMatrix evecs = decomp.eigenvectors();
SortEigenvalue<Complex, SelectionRule> sorting(evals.data(), evals.size());
std::vector<int> ind = sorting.index();
// Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively
for (Index i = 0; i < m_ncv; i++)
{
m_ritz_val[i] = evals[ind[i]];
m_ritz_est[i] = evecs(m_ncv - 1, ind[i]);
}
for (Index i = 0; i < m_nev; i++)
{
m_ritz_vec.col(i).noalias() = evecs.col(ind[i]);
}
}
protected:
// Sorts the first nev Ritz pairs in the specified order
// This is used to return the final results
virtual void sort_ritzpair(int sort_rule)
{
// First make sure that we have a valid index vector
SortEigenvalue<Complex, LARGEST_MAGN> sorting(m_ritz_val.data(), m_nev);
std::vector<int> ind = sorting.index();
switch (sort_rule)
{
case LARGEST_MAGN:
break;
case LARGEST_REAL:
{
SortEigenvalue<Complex, LARGEST_REAL> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
case LARGEST_IMAG:
{
SortEigenvalue<Complex, LARGEST_IMAG> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
case SMALLEST_MAGN:
{
SortEigenvalue<Complex, SMALLEST_MAGN> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
case SMALLEST_REAL:
{
SortEigenvalue<Complex, SMALLEST_REAL> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
case SMALLEST_IMAG:
{
SortEigenvalue<Complex, SMALLEST_IMAG> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
default:
throw std::invalid_argument("unsupported sorting rule");
}
ComplexVector new_ritz_val(m_ncv);
ComplexMatrix new_ritz_vec(m_ncv, m_nev);
BoolArray new_ritz_conv(m_nev);
for (Index i = 0; i < m_nev; i++)
{
new_ritz_val[i] = m_ritz_val[ind[i]];
new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]);
new_ritz_conv[i] = m_ritz_conv[ind[i]];
}
m_ritz_val.swap(new_ritz_val);
m_ritz_vec.swap(new_ritz_vec);
m_ritz_conv.swap(new_ritz_conv);
}
public:
/// \cond
GenEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) :
m_op(op),
m_n(m_op->rows()),
m_nev(nev),
m_ncv(ncv > m_n ? m_n : ncv),
m_nmatop(0),
m_niter(0),
m_fac(ArnoldiOpType(op, Bop), m_ncv),
m_info(NOT_COMPUTED),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
m_eps(Eigen::NumTraits<Scalar>::epsilon()),
m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3))
{
if (nev < 1 || nev > m_n - 2)
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 2, n is the size of matrix");
if (ncv < nev + 2 || ncv > m_n)
throw std::invalid_argument("ncv must satisfy nev + 2 <= ncv <= n, n is the size of matrix");
}
///
/// Virtual destructor
///
virtual ~GenEigsBase() {}
/// \endcond
///
/// Initializes the solver by providing an initial residual vector.
///
/// \param init_resid Pointer to the initial residual vector.
///
/// **Spectra** (and also **ARPACK**) uses an iterative algorithm
/// to find eigenvalues. This function allows the user to provide the initial
/// residual vector.
///
void init(const Scalar* init_resid)
{
// Reset all matrices/vectors to zero
m_ritz_val.resize(m_ncv);
m_ritz_vec.resize(m_ncv, m_nev);
m_ritz_est.resize(m_ncv);
m_ritz_conv.resize(m_nev);
m_ritz_val.setZero();
m_ritz_vec.setZero();
m_ritz_est.setZero();
m_ritz_conv.setZero();
m_nmatop = 0;
m_niter = 0;
// Initialize the Arnoldi factorization
MapConstVec v0(init_resid, m_n);
m_fac.init(v0, m_nmatop);
}
///
/// Initializes the solver by providing a random initial residual vector.
///
/// This overloaded function generates a random initial residual vector
/// (with a fixed random seed) for the algorithm. Elements in the vector
/// follow independent Uniform(-0.5, 0.5) distribution.
///
void init()
{
SimpleRandom<Scalar> rng(0);
Vector init_resid = rng.random_vec(m_n);
init(init_resid.data());
}
///
/// Conducts the major computation procedure.
///
/// \param maxit Maximum number of iterations allowed in the algorithm.
/// \param tol Precision parameter for the calculated eigenvalues.
/// \param sort_rule Rule to sort the eigenvalues and eigenvectors.
/// Supported values are
/// `Spectra::LARGEST_MAGN`, `Spectra::LARGEST_REAL`,
/// `Spectra::LARGEST_IMAG`, `Spectra::SMALLEST_MAGN`,
/// `Spectra::SMALLEST_REAL` and `Spectra::SMALLEST_IMAG`,
/// for example `LARGEST_MAGN` indicates that eigenvalues
/// with largest magnitude come first.
/// Note that this argument is only used to
/// **sort** the final result, and the **selection** rule
/// (e.g. selecting the largest or smallest eigenvalues in the
/// full spectrum) is specified by the template parameter
/// `SelectionRule` of GenEigsSolver.
///
/// \return Number of converged eigenvalues.
///
Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_MAGN)
{
// The m-step Arnoldi factorization
m_fac.factorize_from(1, m_ncv, m_nmatop);
retrieve_ritzpair();
// Restarting
Index i, nconv = 0, nev_adj;
for (i = 0; i < maxit; i++)
{
nconv = num_converged(tol);
if (nconv >= m_nev)
break;
nev_adj = nev_adjusted(nconv);
restart(nev_adj);
}
// Sorting results
sort_ritzpair(sort_rule);
m_niter += i + 1;
m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING;
return std::min(m_nev, nconv);
}
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
int info() const { return m_info; }
///
/// Returns the number of iterations used in the computation.
///
Index num_iterations() const { return m_niter; }
///
/// Returns the number of matrix operations used in the computation.
///
Index num_operations() const { return m_nmatop; }
///
/// Returns the converged eigenvalues.
///
/// \return A complex-valued vector containing the eigenvalues.
/// Returned vector type will be `Eigen::Vector<std::complex<Scalar>, ...>`, depending on
/// the template parameter `Scalar` defined.
///
ComplexVector eigenvalues() const
{
const Index nconv = m_ritz_conv.cast<Index>().sum();
ComplexVector res(nconv);
if (!nconv)
return res;
Index j = 0;
for (Index i = 0; i < m_nev; i++)
{
if (m_ritz_conv[i])
{
res[j] = m_ritz_val[i];
j++;
}
}
return res;
}
///
/// Returns the eigenvectors associated with the converged eigenvalues.
///
/// \param nvec The number of eigenvectors to return.
///
/// \return A complex-valued matrix containing the eigenvectors.
/// Returned matrix type will be `Eigen::Matrix<std::complex<Scalar>, ...>`,
/// depending on the template parameter `Scalar` defined.
///
ComplexMatrix eigenvectors(Index nvec) const
{
const Index nconv = m_ritz_conv.cast<Index>().sum();
nvec = std::min(nvec, nconv);
ComplexMatrix res(m_n, nvec);
if (!nvec)
return res;
ComplexMatrix ritz_vec_conv(m_ncv, nvec);
Index j = 0;
for (Index i = 0; i < m_nev && j < nvec; i++)
{
if (m_ritz_conv[i])
{
ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i);
j++;
}
}
res.noalias() = m_fac.matrix_V() * ritz_vec_conv;
return res;
}
///
/// Returns all converged eigenvectors.
///
ComplexMatrix eigenvectors() const
{
return eigenvectors(m_nev);
}
};
} // namespace Spectra
#endif // GEN_EIGS_BASE_H

View File

@ -0,0 +1,157 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
#define GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
#include <Eigen/Core>
#include "GenEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseGenComplexShiftSolve.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for general real matrices with
/// a complex shift value in the **shift-and-invert mode**. The background
/// knowledge of the shift-and-invert mode can be found in the documentation
/// of the SymEigsShiftSolver class.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the shifted-and-inverted eigenvalues.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the DenseGenComplexShiftSolve wrapper class, or define their
/// own that implements all the public member functions as in
/// DenseGenComplexShiftSolve.
///
template <typename Scalar = double,
int SelectionRule = LARGEST_MAGN,
typename OpType = DenseGenComplexShiftSolve<double> >
class GenEigsComplexShiftSolver : public GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
{
private:
typedef Eigen::Index Index;
typedef std::complex<Scalar> Complex;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
const Scalar m_sigmar;
const Scalar m_sigmai;
// First transform back the Ritz values, and then sort
void sort_ritzpair(int sort_rule)
{
using std::abs;
using std::sqrt;
using std::norm;
// The eigenvalues we get from the iteration is
// nu = 0.5 * (1 / (lambda - sigma) + 1 / (lambda - conj(sigma)))
// So the eigenvalues of the original problem is
// 1 \pm sqrt(1 - 4 * nu^2 * sigmai^2)
// lambda = sigmar + -----------------------------------
// 2 * nu
// We need to pick the correct root
// Let (lambdaj, vj) be the j-th eigen pair, then A * vj = lambdaj * vj
// and inv(A - r * I) * vj = 1 / (lambdaj - r) * vj
// where r is any shift value.
// We can use this identity to determine lambdaj
//
// op(v) computes Re(inv(A - r * I) * v) for any real v
// If r is real, then op(v) is also real. Let a = Re(vj), b = Im(vj),
// then op(vj) = op(a) + op(b) * i
// By comparing op(vj) and [1 / (lambdaj - r) * vj], we can determine
// which one is the correct root
// Select a random shift value
SimpleRandom<Scalar> rng(0);
const Scalar shiftr = rng.random() * m_sigmar + rng.random();
const Complex shift = Complex(shiftr, Scalar(0));
this->m_op->set_shift(shiftr, Scalar(0));
// Calculate inv(A - r * I) * vj
Vector v_real(this->m_n), v_imag(this->m_n), OPv_real(this->m_n), OPv_imag(this->m_n);
const Scalar eps = Eigen::NumTraits<Scalar>::epsilon();
for (Index i = 0; i < this->m_nev; i++)
{
v_real.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).real();
v_imag.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).imag();
this->m_op->perform_op(v_real.data(), OPv_real.data());
this->m_op->perform_op(v_imag.data(), OPv_imag.data());
// Two roots computed from the quadratic equation
const Complex nu = this->m_ritz_val[i];
const Complex root_part1 = m_sigmar + Scalar(0.5) / nu;
const Complex root_part2 = Scalar(0.5) * sqrt(Scalar(1) - Scalar(4) * m_sigmai * m_sigmai * (nu * nu)) / nu;
const Complex root1 = root_part1 + root_part2;
const Complex root2 = root_part1 - root_part2;
// Test roots
Scalar err1 = Scalar(0), err2 = Scalar(0);
for (int k = 0; k < this->m_n; k++)
{
const Complex rhs1 = Complex(v_real[k], v_imag[k]) / (root1 - shift);
const Complex rhs2 = Complex(v_real[k], v_imag[k]) / (root2 - shift);
const Complex OPv = Complex(OPv_real[k], OPv_imag[k]);
err1 += norm(OPv - rhs1);
err2 += norm(OPv - rhs2);
}
const Complex lambdaj = (err1 < err2) ? root1 : root2;
this->m_ritz_val[i] = lambdaj;
if (abs(Eigen::numext::imag(lambdaj)) > eps)
{
this->m_ritz_val[i + 1] = Eigen::numext::conj(lambdaj);
i++;
}
else
{
this->m_ritz_val[i] = Complex(Eigen::numext::real(lambdaj), Scalar(0));
}
}
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a eigen solver object using the shift-and-invert mode.
///
/// \param op Pointer to the matrix operation object. This class should implement
/// the complex shift-solve operation of \f$A\f$: calculating
/// \f$\mathrm{Re}\{(A-\sigma I)^{-1}v\}\f$ for any vector \f$v\f$. Users could either
/// create the object from the DenseGenComplexShiftSolve wrapper class, or
/// define their own that implements all the public member functions
/// as in DenseGenComplexShiftSolve.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
/// \param sigmar The real part of the shift.
/// \param sigmai The imaginary part of the shift.
///
GenEigsComplexShiftSolver(OpType* op, Index nev, Index ncv, const Scalar& sigmar, const Scalar& sigmai) :
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv),
m_sigmar(sigmar), m_sigmai(sigmai)
{
this->m_op->set_shift(m_sigmar, m_sigmai);
}
};
} // namespace Spectra
#endif // GEN_EIGS_COMPLEX_SHIFT_SOLVER_H

View File

@ -0,0 +1,89 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef GEN_EIGS_REAL_SHIFT_SOLVER_H
#define GEN_EIGS_REAL_SHIFT_SOLVER_H
#include <Eigen/Core>
#include "GenEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseGenRealShiftSolve.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for general real matrices with
/// a real shift value in the **shift-and-invert mode**. The background
/// knowledge of the shift-and-invert mode can be found in the documentation
/// of the SymEigsShiftSolver class.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the shifted-and-inverted eigenvalues.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseGenRealShiftSolve and
/// SparseGenRealShiftSolve, or define their
/// own that implements all the public member functions as in
/// DenseGenRealShiftSolve.
///
template <typename Scalar = double,
int SelectionRule = LARGEST_MAGN,
typename OpType = DenseGenRealShiftSolve<double> >
class GenEigsRealShiftSolver : public GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
{
private:
typedef Eigen::Index Index;
typedef std::complex<Scalar> Complex;
typedef Eigen::Array<Complex, Eigen::Dynamic, 1> ComplexArray;
const Scalar m_sigma;
// First transform back the Ritz values, and then sort
void sort_ritzpair(int sort_rule)
{
// The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma)
// So the eigenvalues of the original problem is lambda = 1 / nu + sigma
ComplexArray ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma;
this->m_ritz_val.head(this->m_nev) = ritz_val_org;
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a eigen solver object using the shift-and-invert mode.
///
/// \param op Pointer to the matrix operation object. This class should implement
/// the shift-solve operation of \f$A\f$: calculating
/// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseGenRealShiftSolve, or
/// define their own that implements all the public member functions
/// as in DenseGenRealShiftSolve.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
/// \param sigma The real-valued shift.
///
GenEigsRealShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) :
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv),
m_sigma(sigma)
{
this->m_op->set_shift(m_sigma);
}
};
} // namespace Spectra
#endif // GEN_EIGS_REAL_SHIFT_SOLVER_H

158
gtsam/3rdparty/Spectra/GenEigsSolver.h vendored Normal file
View File

@ -0,0 +1,158 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef GEN_EIGS_SOLVER_H
#define GEN_EIGS_SOLVER_H
#include <Eigen/Core>
#include "GenEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseGenMatProd.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for general real matrices, i.e.,
/// to solve \f$Ax=\lambda x\f$ for a possibly non-symmetric \f$A\f$ matrix.
///
/// Most of the background information documented in the SymEigsSolver class
/// also applies to the GenEigsSolver class here, except that the eigenvalues
/// and eigenvectors of a general matrix can now be complex-valued.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the requested eigenvalues, for example `LARGEST_MAGN`
/// to retrieve eigenvalues with the largest magnitude.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseGenMatProd and
/// SparseGenMatProd, or define their
/// own that implements all the public member functions as in
/// DenseGenMatProd.
///
/// An example that illustrates the usage of GenEigsSolver is give below:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/GenEigsSolver.h>
/// // <Spectra/MatOp/DenseGenMatProd.h> is implicitly included
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to calculate the eigenvalues of M
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(10, 10);
///
/// // Construct matrix operation object using the wrapper class
/// DenseGenMatProd<double> op(M);
///
/// // Construct eigen solver object, requesting the largest
/// // (in magnitude, or norm) three eigenvalues
/// GenEigsSolver< double, LARGEST_MAGN, DenseGenMatProd<double> > eigs(&op, 3, 6);
///
/// // Initialize and compute
/// eigs.init();
/// int nconv = eigs.compute();
///
/// // Retrieve results
/// Eigen::VectorXcd evalues;
/// if(eigs.info() == SUCCESSFUL)
/// evalues = eigs.eigenvalues();
///
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
///
/// return 0;
/// }
/// \endcode
///
/// And also an example for sparse matrices:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Eigen/SparseCore>
/// #include <Spectra/GenEigsSolver.h>
/// #include <Spectra/MatOp/SparseGenMatProd.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // A band matrix with 1 on the main diagonal, 2 on the below-main subdiagonal,
/// // and 3 on the above-main subdiagonal
/// const int n = 10;
/// Eigen::SparseMatrix<double> M(n, n);
/// M.reserve(Eigen::VectorXi::Constant(n, 3));
/// for(int i = 0; i < n; i++)
/// {
/// M.insert(i, i) = 1.0;
/// if(i > 0)
/// M.insert(i - 1, i) = 3.0;
/// if(i < n - 1)
/// M.insert(i + 1, i) = 2.0;
/// }
///
/// // Construct matrix operation object using the wrapper class SparseGenMatProd
/// SparseGenMatProd<double> op(M);
///
/// // Construct eigen solver object, requesting the largest three eigenvalues
/// GenEigsSolver< double, LARGEST_MAGN, SparseGenMatProd<double> > eigs(&op, 3, 6);
///
/// // Initialize and compute
/// eigs.init();
/// int nconv = eigs.compute();
///
/// // Retrieve results
/// Eigen::VectorXcd evalues;
/// if(eigs.info() == SUCCESSFUL)
/// evalues = eigs.eigenvalues();
///
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
///
/// return 0;
/// }
/// \endcode
template <typename Scalar = double,
int SelectionRule = LARGEST_MAGN,
typename OpType = DenseGenMatProd<double> >
class GenEigsSolver : public GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
{
private:
typedef Eigen::Index Index;
public:
///
/// Constructor to create a solver object.
///
/// \param op Pointer to the matrix operation object, which should implement
/// the matrix-vector multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseGenMatProd, or
/// define their own that implements all the public member functions
/// as in DenseGenMatProd.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
///
GenEigsSolver(OpType* op, Index nev, Index ncv) :
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv)
{}
};
} // namespace Spectra
#endif // GEN_EIGS_SOLVER_H

284
gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h vendored Normal file
View File

@ -0,0 +1,284 @@
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef ARNOLDI_H
#define ARNOLDI_H
#include <Eigen/Core>
#include <cmath> // std::sqrt
#include <stdexcept> // std::invalid_argument
#include <sstream> // std::stringstream
#include "../MatOp/internal/ArnoldiOp.h"
#include "../Util/TypeTraits.h"
#include "../Util/SimpleRandom.h"
#include "UpperHessenbergQR.h"
#include "DoubleShiftQR.h"
namespace Spectra {
// Arnoldi factorization A * V = V * H + f * e'
// A: n x n
// V: n x k
// H: k x k
// f: n x 1
// e: [0, ..., 0, 1]
// V and H are allocated of dimension m, so the maximum value of k is m
template <typename Scalar, typename ArnoldiOpType>
class Arnoldi
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<Matrix> MapMat;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::Map<const Matrix> MapConstMat;
typedef Eigen::Map<const Vector> MapConstVec;
protected:
// clang-format off
ArnoldiOpType m_op; // Operators for the Arnoldi factorization
const Index m_n; // dimension of A
const Index m_m; // maximum dimension of subspace V
Index m_k; // current dimension of subspace V
Matrix m_fac_V; // V matrix in the Arnoldi factorization
Matrix m_fac_H; // H matrix in the Arnoldi factorization
Vector m_fac_f; // residual in the Arnoldi factorization
Scalar m_beta; // ||f||, B-norm of f
const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow
// ~= 1e-307 for the "double" type
const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type
// clang-format on
// Given orthonormal basis functions V, find a nonzero vector f such that V'Bf = 0
// Assume that f has been properly allocated
void expand_basis(MapConstMat& V, const Index seed, Vector& f, Scalar& fnorm)
{
using std::sqrt;
const Scalar thresh = m_eps * sqrt(Scalar(m_n));
Vector Vf(V.cols());
for (Index iter = 0; iter < 5; iter++)
{
// Randomly generate a new vector and orthogonalize it against V
SimpleRandom<Scalar> rng(seed + 123 * iter);
f.noalias() = rng.random_vec(m_n);
// f <- f - V * V'Bf, so that f is orthogonal to V in B-norm
m_op.trans_product(V, f, Vf);
f.noalias() -= V * Vf;
// fnorm <- ||f||
fnorm = m_op.norm(f);
// If fnorm is too close to zero, we try a new random vector,
// otherwise return the result
if (fnorm >= thresh)
return;
}
}
public:
Arnoldi(const ArnoldiOpType& op, Index m) :
m_op(op), m_n(op.rows()), m_m(m), m_k(0),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
m_eps(Eigen::NumTraits<Scalar>::epsilon())
{}
virtual ~Arnoldi() {}
// Const-reference to internal structures
const Matrix& matrix_V() const { return m_fac_V; }
const Matrix& matrix_H() const { return m_fac_H; }
const Vector& vector_f() const { return m_fac_f; }
Scalar f_norm() const { return m_beta; }
Index subspace_dim() const { return m_k; }
// Initialize with an operator and an initial vector
void init(MapConstVec& v0, Index& op_counter)
{
m_fac_V.resize(m_n, m_m);
m_fac_H.resize(m_m, m_m);
m_fac_f.resize(m_n);
m_fac_H.setZero();
// Verify the initial vector
const Scalar v0norm = m_op.norm(v0);
if (v0norm < m_near_0)
throw std::invalid_argument("initial residual vector cannot be zero");
// Points to the first column of V
MapVec v(m_fac_V.data(), m_n);
// Normalize
v.noalias() = v0 / v0norm;
// Compute H and f
Vector w(m_n);
m_op.perform_op(v.data(), w.data());
op_counter++;
m_fac_H(0, 0) = m_op.inner_product(v, w);
m_fac_f.noalias() = w - v * m_fac_H(0, 0);
// In some cases f is zero in exact arithmetics, but due to rounding errors
// it may contain tiny fluctuations. When this happens, we force f to be zero
if (m_fac_f.cwiseAbs().maxCoeff() < m_eps)
{
m_fac_f.setZero();
m_beta = Scalar(0);
}
else
{
m_beta = m_op.norm(m_fac_f);
}
// Indicate that this is a step-1 factorization
m_k = 1;
}
// Arnoldi factorization starting from step-k
virtual void factorize_from(Index from_k, Index to_m, Index& op_counter)
{
using std::sqrt;
if (to_m <= from_k)
return;
if (from_k > m_k)
{
std::stringstream msg;
msg << "Arnoldi: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")";
throw std::invalid_argument(msg.str());
}
const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n));
// Pre-allocate vectors
Vector Vf(to_m);
Vector w(m_n);
// Keep the upperleft k x k submatrix of H and set other elements to 0
m_fac_H.rightCols(m_m - from_k).setZero();
m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero();
for (Index i = from_k; i <= to_m - 1; i++)
{
bool restart = false;
// If beta = 0, then the next V is not full rank
// We need to generate a new residual vector that is orthogonal
// to the current V, which we call a restart
if (m_beta < m_near_0)
{
MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns
expand_basis(V, 2 * i, m_fac_f, m_beta);
restart = true;
}
// v <- f / ||f||
m_fac_V.col(i).noalias() = m_fac_f / m_beta; // The (i+1)-th column
// Note that H[i+1, i] equals to the unrestarted beta
m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta;
// w <- A * v, v = m_fac_V.col(i)
m_op.perform_op(&m_fac_V(0, i), w.data());
op_counter++;
const Index i1 = i + 1;
// First i+1 columns of V
MapConstMat Vs(m_fac_V.data(), m_n, i1);
// h = m_fac_H(0:i, i)
MapVec h(&m_fac_H(0, i), i1);
// h <- V'Bw
m_op.trans_product(Vs, w, h);
// f <- w - V * h
m_fac_f.noalias() = w - Vs * h;
m_beta = m_op.norm(m_fac_f);
if (m_beta > Scalar(0.717) * m_op.norm(h))
continue;
// f/||f|| is going to be the next column of V, so we need to test
// whether V'B(f/||f||) ~= 0
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
// If not, iteratively correct the residual
int count = 0;
while (count < 5 && ortho_err > m_eps * m_beta)
{
// There is an edge case: when beta=||f|| is close to zero, f mostly consists
// of noises of rounding errors, so the test [ortho_err < eps * beta] is very
// likely to fail. In particular, if beta=0, then the test is ensured to fail.
// Hence when this happens, we force f to be zero, and then restart in the
// next iteration.
if (m_beta < beta_thresh)
{
m_fac_f.setZero();
m_beta = Scalar(0);
break;
}
// f <- f - V * Vf
m_fac_f.noalias() -= Vs * Vf.head(i1);
// h <- h + Vf
h.noalias() += Vf.head(i1);
// beta <- ||f||
m_beta = m_op.norm(m_fac_f);
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
count++;
}
}
// Indicate that this is a step-m factorization
m_k = to_m;
}
// Apply H -> Q'HQ, where Q is from a double shift QR decomposition
void compress_H(const DoubleShiftQR<Scalar>& decomp)
{
decomp.matrix_QtHQ(m_fac_H);
m_k -= 2;
}
// Apply H -> Q'HQ, where Q is from an upper Hessenberg QR decomposition
void compress_H(const UpperHessenbergQR<Scalar>& decomp)
{
decomp.matrix_QtHQ(m_fac_H);
m_k--;
}
// Apply V -> VQ and compute the new f.
// Should be called after compress_H(), since m_k is updated there.
// Only need to update the first k+1 columns of V
// The first (m - k + i) elements of the i-th column of Q are non-zero,
// and the rest are zero
void compress_V(const Matrix& Q)
{
Matrix Vs(m_n, m_k + 1);
for (Index i = 0; i < m_k; i++)
{
const Index nnz = m_m - m_k + i + 1;
MapConstVec q(&Q(0, i), nnz);
Vs.col(i).noalias() = m_fac_V.leftCols(nnz) * q;
}
Vs.col(m_k).noalias() = m_fac_V * Q.col(m_k);
m_fac_V.leftCols(m_k + 1).noalias() = Vs;
Vector fk = m_fac_f * Q(m_m - 1, m_k - 1) + m_fac_V.col(m_k) * m_fac_H(m_k, m_k - 1);
m_fac_f.swap(fk);
m_beta = m_op.norm(m_fac_f);
}
};
} // namespace Spectra
#endif // ARNOLDI_H

530
gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h vendored Normal file
View File

@ -0,0 +1,530 @@
// Copyright (C) 2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef BK_LDLT_H
#define BK_LDLT_H
#include <Eigen/Core>
#include <vector>
#include <stdexcept>
#include "../Util/CompInfo.h"
namespace Spectra {
// Bunch-Kaufman LDLT decomposition
// References:
// 1. Bunch, J. R., & Kaufman, L. (1977). Some stable methods for calculating inertia and solving symmetric linear systems.
// Mathematics of computation, 31(137), 163-179.
// 2. Golub, G. H., & Van Loan, C. F. (2012). Matrix computations (Vol. 3). JHU press. Section 4.4.
// 3. Bunch-Parlett diagonal pivoting <http://oz.nthu.edu.tw/~d947207/Chap13_GE3.ppt>
// 4. Ashcraft, C., Grimes, R. G., & Lewis, J. G. (1998). Accurate symmetric indefinite linear equation solvers.
// SIAM Journal on Matrix Analysis and Applications, 20(2), 513-561.
template <typename Scalar = double>
class BKLDLT
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Matrix<Index, Eigen::Dynamic, 1> IntVector;
typedef Eigen::Ref<Vector> GenericVector;
typedef Eigen::Ref<Matrix> GenericMatrix;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
typedef const Eigen::Ref<const Vector> ConstGenericVector;
Index m_n;
Vector m_data; // storage for a lower-triangular matrix
std::vector<Scalar*> m_colptr; // pointers to columns
IntVector m_perm; // [-2, -1, 3, 1, 4, 5]: 0 <-> 2, 1 <-> 1, 2 <-> 3, 3 <-> 1, 4 <-> 4, 5 <-> 5
std::vector<std::pair<Index, Index> > m_permc; // compressed version of m_perm: [(0, 2), (2, 3), (3, 1)]
bool m_computed;
int m_info;
// Access to elements
// Pointer to the k-th column
Scalar* col_pointer(Index k) { return m_colptr[k]; }
// A[i, j] -> m_colptr[j][i - j], i >= j
Scalar& coeff(Index i, Index j) { return m_colptr[j][i - j]; }
const Scalar& coeff(Index i, Index j) const { return m_colptr[j][i - j]; }
// A[i, i] -> m_colptr[i][0]
Scalar& diag_coeff(Index i) { return m_colptr[i][0]; }
const Scalar& diag_coeff(Index i) const { return m_colptr[i][0]; }
// Compute column pointers
void compute_pointer()
{
m_colptr.clear();
m_colptr.reserve(m_n);
Scalar* head = m_data.data();
for (Index i = 0; i < m_n; i++)
{
m_colptr.push_back(head);
head += (m_n - i);
}
}
// Copy mat - shift * I to m_data
void copy_data(ConstGenericMatrix& mat, int uplo, const Scalar& shift)
{
if (uplo == Eigen::Lower)
{
for (Index j = 0; j < m_n; j++)
{
const Scalar* begin = &mat.coeffRef(j, j);
const Index len = m_n - j;
std::copy(begin, begin + len, col_pointer(j));
diag_coeff(j) -= shift;
}
}
else
{
Scalar* dest = m_data.data();
for (Index i = 0; i < m_n; i++)
{
for (Index j = i; j < m_n; j++, dest++)
{
*dest = mat.coeff(i, j);
}
diag_coeff(i) -= shift;
}
}
}
// Compute compressed permutations
void compress_permutation()
{
for (Index i = 0; i < m_n; i++)
{
// Recover the permutation action
const Index perm = (m_perm[i] >= 0) ? (m_perm[i]) : (-m_perm[i] - 1);
if (perm != i)
m_permc.push_back(std::make_pair(i, perm));
}
}
// Working on the A[k:end, k:end] submatrix
// Exchange k <-> r
// Assume r >= k
void pivoting_1x1(Index k, Index r)
{
// No permutation
if (k == r)
{
m_perm[k] = r;
return;
}
// A[k, k] <-> A[r, r]
std::swap(diag_coeff(k), diag_coeff(r));
// A[(r+1):end, k] <-> A[(r+1):end, r]
std::swap_ranges(&coeff(r + 1, k), col_pointer(k + 1), &coeff(r + 1, r));
// A[(k+1):(r-1), k] <-> A[r, (k+1):(r-1)]
Scalar* src = &coeff(k + 1, k);
for (Index j = k + 1; j < r; j++, src++)
{
std::swap(*src, coeff(r, j));
}
m_perm[k] = r;
}
// Working on the A[k:end, k:end] submatrix
// Exchange [k+1, k] <-> [r, p]
// Assume p >= k, r >= k+1
void pivoting_2x2(Index k, Index r, Index p)
{
pivoting_1x1(k, p);
pivoting_1x1(k + 1, r);
// A[k+1, k] <-> A[r, k]
std::swap(coeff(k + 1, k), coeff(r, k));
// Use negative signs to indicate a 2x2 block
// Also minus one to distinguish a negative zero from a positive zero
m_perm[k] = -m_perm[k] - 1;
m_perm[k + 1] = -m_perm[k + 1] - 1;
}
// A[r1, c1:c2] <-> A[r2, c1:c2]
// Assume r2 >= r1 > c2 >= c1
void interchange_rows(Index r1, Index r2, Index c1, Index c2)
{
if (r1 == r2)
return;
for (Index j = c1; j <= c2; j++)
{
std::swap(coeff(r1, j), coeff(r2, j));
}
}
// lambda = |A[r, k]| = max{|A[k+1, k]|, ..., |A[end, k]|}
// Largest (in magnitude) off-diagonal element in the first column of the current reduced matrix
// r is the row index
// Assume k < end
Scalar find_lambda(Index k, Index& r)
{
using std::abs;
const Scalar* head = col_pointer(k); // => A[k, k]
const Scalar* end = col_pointer(k + 1);
// Start with r=k+1, lambda=A[k+1, k]
r = k + 1;
Scalar lambda = abs(head[1]);
// Scan remaining elements
for (const Scalar* ptr = head + 2; ptr < end; ptr++)
{
const Scalar abs_elem = abs(*ptr);
if (lambda < abs_elem)
{
lambda = abs_elem;
r = k + (ptr - head);
}
}
return lambda;
}
// sigma = |A[p, r]| = max {|A[k, r]|, ..., |A[end, r]|} \ {A[r, r]}
// Largest (in magnitude) off-diagonal element in the r-th column of the current reduced matrix
// p is the row index
// Assume k < r < end
Scalar find_sigma(Index k, Index r, Index& p)
{
using std::abs;
// First search A[r+1, r], ..., A[end, r], which has the same task as find_lambda()
// If r == end, we skip this search
Scalar sigma = Scalar(-1);
if (r < m_n - 1)
sigma = find_lambda(r, p);
// Then search A[k, r], ..., A[r-1, r], which maps to A[r, k], ..., A[r, r-1]
for (Index j = k; j < r; j++)
{
const Scalar abs_elem = abs(coeff(r, j));
if (sigma < abs_elem)
{
sigma = abs_elem;
p = j;
}
}
return sigma;
}
// Generate permutations and apply to A
// Return true if the resulting pivoting is 1x1, and false if 2x2
bool permutate_mat(Index k, const Scalar& alpha)
{
using std::abs;
Index r = k, p = k;
const Scalar lambda = find_lambda(k, r);
// If lambda=0, no need to interchange
if (lambda > Scalar(0))
{
const Scalar abs_akk = abs(diag_coeff(k));
// If |A[k, k]| >= alpha * lambda, no need to interchange
if (abs_akk < alpha * lambda)
{
const Scalar sigma = find_sigma(k, r, p);
// If sigma * |A[k, k]| >= alpha * lambda^2, no need to interchange
if (sigma * abs_akk < alpha * lambda * lambda)
{
if (abs_akk >= alpha * sigma)
{
// Permutation on A
pivoting_1x1(k, r);
// Permutation on L
interchange_rows(k, r, 0, k - 1);
return true;
}
else
{
// There are two versions of permutation here
// 1. A[k+1, k] <-> A[r, k]
// 2. A[k+1, k] <-> A[r, p], where p >= k and r >= k+1
//
// Version 1 and 2 are used by Ref[1] and Ref[2], respectively
// Version 1 implementation
p = k;
// Version 2 implementation
// [r, p] and [p, r] are symmetric, but we need to make sure
// p >= k and r >= k+1, so it is safe to always make r > p
// One exception is when min{r,p} == k+1, in which case we make
// r = k+1, so that only one permutation needs to be performed
/* const Index rp_min = std::min(r, p);
const Index rp_max = std::max(r, p);
if(rp_min == k + 1)
{
r = rp_min; p = rp_max;
} else {
r = rp_max; p = rp_min;
} */
// Right now we use Version 1 since it reduces the overhead of interchange
// Permutation on A
pivoting_2x2(k, r, p);
// Permutation on L
interchange_rows(k, p, 0, k - 1);
interchange_rows(k + 1, r, 0, k - 1);
return false;
}
}
}
}
return true;
}
// E = [e11, e12]
// [e21, e22]
// Overwrite E with inv(E)
void inverse_inplace_2x2(Scalar& e11, Scalar& e21, Scalar& e22) const
{
// inv(E) = [d11, d12], d11 = e22/delta, d21 = -e21/delta, d22 = e11/delta
// [d21, d22]
const Scalar delta = e11 * e22 - e21 * e21;
std::swap(e11, e22);
e11 /= delta;
e22 /= delta;
e21 = -e21 / delta;
}
// Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE
int gaussian_elimination_1x1(Index k)
{
// D = 1 / A[k, k]
const Scalar akk = diag_coeff(k);
// Return NUMERICAL_ISSUE if not invertible
if (akk == Scalar(0))
return NUMERICAL_ISSUE;
diag_coeff(k) = Scalar(1) / akk;
// B -= l * l' / A[k, k], B := A[(k+1):end, (k+1):end], l := L[(k+1):end, k]
Scalar* lptr = col_pointer(k) + 1;
const Index ldim = m_n - k - 1;
MapVec l(lptr, ldim);
for (Index j = 0; j < ldim; j++)
{
MapVec(col_pointer(j + k + 1), ldim - j).noalias() -= (lptr[j] / akk) * l.tail(ldim - j);
}
// l /= A[k, k]
l /= akk;
return SUCCESSFUL;
}
// Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE
int gaussian_elimination_2x2(Index k)
{
// D = inv(E)
Scalar& e11 = diag_coeff(k);
Scalar& e21 = coeff(k + 1, k);
Scalar& e22 = diag_coeff(k + 1);
// Return NUMERICAL_ISSUE if not invertible
if (e11 * e22 - e21 * e21 == Scalar(0))
return NUMERICAL_ISSUE;
inverse_inplace_2x2(e11, e21, e22);
// X = l * inv(E), l := L[(k+2):end, k:(k+1)]
Scalar* l1ptr = &coeff(k + 2, k);
Scalar* l2ptr = &coeff(k + 2, k + 1);
const Index ldim = m_n - k - 2;
MapVec l1(l1ptr, ldim), l2(l2ptr, ldim);
Eigen::Matrix<Scalar, Eigen::Dynamic, 2> X(ldim, 2);
X.col(0).noalias() = l1 * e11 + l2 * e21;
X.col(1).noalias() = l1 * e21 + l2 * e22;
// B -= l * inv(E) * l' = X * l', B = A[(k+2):end, (k+2):end]
for (Index j = 0; j < ldim; j++)
{
MapVec(col_pointer(j + k + 2), ldim - j).noalias() -= (X.col(0).tail(ldim - j) * l1ptr[j] + X.col(1).tail(ldim - j) * l2ptr[j]);
}
// l = X
l1.noalias() = X.col(0);
l2.noalias() = X.col(1);
return SUCCESSFUL;
}
public:
BKLDLT() :
m_n(0), m_computed(false), m_info(NOT_COMPUTED)
{}
// Factorize mat - shift * I
BKLDLT(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) :
m_n(mat.rows()), m_computed(false), m_info(NOT_COMPUTED)
{
compute(mat, uplo, shift);
}
void compute(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0))
{
using std::abs;
m_n = mat.rows();
if (m_n != mat.cols())
throw std::invalid_argument("BKLDLT: matrix must be square");
m_perm.setLinSpaced(m_n, 0, m_n - 1);
m_permc.clear();
// Copy data
m_data.resize((m_n * (m_n + 1)) / 2);
compute_pointer();
copy_data(mat, uplo, shift);
const Scalar alpha = (1.0 + std::sqrt(17.0)) / 8.0;
Index k = 0;
for (k = 0; k < m_n - 1; k++)
{
// 1. Interchange rows and columns of A, and save the result to m_perm
bool is_1x1 = permutate_mat(k, alpha);
// 2. Gaussian elimination
if (is_1x1)
{
m_info = gaussian_elimination_1x1(k);
}
else
{
m_info = gaussian_elimination_2x2(k);
k++;
}
// 3. Check status
if (m_info != SUCCESSFUL)
break;
}
// Invert the last 1x1 block if it exists
if (k == m_n - 1)
{
const Scalar akk = diag_coeff(k);
if (akk == Scalar(0))
m_info = NUMERICAL_ISSUE;
diag_coeff(k) = Scalar(1) / diag_coeff(k);
}
compress_permutation();
m_computed = true;
}
// Solve Ax=b
void solve_inplace(GenericVector b) const
{
if (!m_computed)
throw std::logic_error("BKLDLT: need to call compute() first");
// PAP' = LDL'
// 1. b -> Pb
Scalar* x = b.data();
MapVec res(x, m_n);
Index npermc = m_permc.size();
for (Index i = 0; i < npermc; i++)
{
std::swap(x[m_permc[i].first], x[m_permc[i].second]);
}
// 2. Lz = Pb
// If m_perm[end] < 0, then end with m_n - 3, otherwise end with m_n - 2
const Index end = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2);
for (Index i = 0; i <= end; i++)
{
const Index b1size = m_n - i - 1;
const Index b2size = b1size - 1;
if (m_perm[i] >= 0)
{
MapConstVec l(&coeff(i + 1, i), b1size);
res.segment(i + 1, b1size).noalias() -= l * x[i];
}
else
{
MapConstVec l1(&coeff(i + 2, i), b2size);
MapConstVec l2(&coeff(i + 2, i + 1), b2size);
res.segment(i + 2, b2size).noalias() -= (l1 * x[i] + l2 * x[i + 1]);
i++;
}
}
// 3. Dw = z
for (Index i = 0; i < m_n; i++)
{
const Scalar e11 = diag_coeff(i);
if (m_perm[i] >= 0)
{
x[i] *= e11;
}
else
{
const Scalar e21 = coeff(i + 1, i), e22 = diag_coeff(i + 1);
const Scalar wi = x[i] * e11 + x[i + 1] * e21;
x[i + 1] = x[i] * e21 + x[i + 1] * e22;
x[i] = wi;
i++;
}
}
// 4. L'y = w
// If m_perm[end] < 0, then start with m_n - 3, otherwise start with m_n - 2
Index i = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2);
for (; i >= 0; i--)
{
const Index ldim = m_n - i - 1;
MapConstVec l(&coeff(i + 1, i), ldim);
x[i] -= res.segment(i + 1, ldim).dot(l);
if (m_perm[i] < 0)
{
MapConstVec l2(&coeff(i + 1, i - 1), ldim);
x[i - 1] -= res.segment(i + 1, ldim).dot(l2);
i--;
}
}
// 5. x = P'y
for (Index i = npermc - 1; i >= 0; i--)
{
std::swap(x[m_permc[i].first], x[m_permc[i].second]);
}
}
Vector solve(ConstGenericVector& b) const
{
Vector res = b;
solve_inplace(res);
return res;
}
int info() const { return m_info; }
};
} // namespace Spectra
#endif // BK_LDLT_H

View File

@ -0,0 +1,384 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DOUBLE_SHIFT_QR_H
#define DOUBLE_SHIFT_QR_H
#include <Eigen/Core>
#include <vector> // std::vector
#include <algorithm> // std::min, std::fill, std::copy
#include <cmath> // std::abs, std::sqrt, std::pow
#include <stdexcept> // std::invalid_argument, std::logic_error
#include "../Util/TypeTraits.h"
namespace Spectra {
template <typename Scalar = double>
class DoubleShiftQR
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, 3, Eigen::Dynamic> Matrix3X;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Array<unsigned char, Eigen::Dynamic, 1> IntArray;
typedef Eigen::Ref<Matrix> GenericMatrix;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
Index m_n; // Dimension of the matrix
Matrix m_mat_H; // A copy of the matrix to be factorized
Scalar m_shift_s; // Shift constant
Scalar m_shift_t; // Shift constant
Matrix3X m_ref_u; // Householder reflectors
IntArray m_ref_nr; // How many rows does each reflector affects
// 3 - A general reflector
// 2 - A Givens rotation
// 1 - An identity transformation
const Scalar m_near_0; // a very small value, but 1.0 / m_safe_min does not overflow
// ~= 1e-307 for the "double" type
const Scalar m_eps; // the machine precision,
// e.g. ~= 1e-16 for the "double" type
const Scalar m_eps_rel;
const Scalar m_eps_abs;
bool m_computed; // Whether matrix has been factorized
void compute_reflector(const Scalar& x1, const Scalar& x2, const Scalar& x3, Index ind)
{
using std::abs;
Scalar* u = &m_ref_u.coeffRef(0, ind);
unsigned char* nr = m_ref_nr.data();
// In general case the reflector affects 3 rows
nr[ind] = 3;
Scalar x2x3 = Scalar(0);
// If x3 is zero, decrease nr by 1
if (abs(x3) < m_near_0)
{
// If x2 is also zero, nr will be 1, and we can exit this function
if (abs(x2) < m_near_0)
{
nr[ind] = 1;
return;
}
else
{
nr[ind] = 2;
}
x2x3 = abs(x2);
}
else
{
x2x3 = Eigen::numext::hypot(x2, x3);
}
// x1' = x1 - rho * ||x||
// rho = -sign(x1), if x1 == 0, we choose rho = 1
Scalar x1_new = x1 - ((x1 <= 0) - (x1 > 0)) * Eigen::numext::hypot(x1, x2x3);
Scalar x_norm = Eigen::numext::hypot(x1_new, x2x3);
// Double check the norm of new x
if (x_norm < m_near_0)
{
nr[ind] = 1;
return;
}
u[0] = x1_new / x_norm;
u[1] = x2 / x_norm;
u[2] = x3 / x_norm;
}
void compute_reflector(const Scalar* x, Index ind)
{
compute_reflector(x[0], x[1], x[2], ind);
}
// Update the block X = H(il:iu, il:iu)
void update_block(Index il, Index iu)
{
// Block size
const Index bsize = iu - il + 1;
// If block size == 1, there is no need to apply reflectors
if (bsize == 1)
{
m_ref_nr.coeffRef(il) = 1;
return;
}
const Scalar x00 = m_mat_H.coeff(il, il),
x01 = m_mat_H.coeff(il, il + 1),
x10 = m_mat_H.coeff(il + 1, il),
x11 = m_mat_H.coeff(il + 1, il + 1);
// m00 = x00 * (x00 - s) + x01 * x10 + t
const Scalar m00 = x00 * (x00 - m_shift_s) + x01 * x10 + m_shift_t;
// m10 = x10 * (x00 + x11 - s)
const Scalar m10 = x10 * (x00 + x11 - m_shift_s);
// For block size == 2, do a Givens rotation on M = X * X - s * X + t * I
if (bsize == 2)
{
// This causes nr=2
compute_reflector(m00, m10, 0, il);
// Apply the reflector to X
apply_PX(m_mat_H.block(il, il, 2, m_n - il), m_n, il);
apply_XP(m_mat_H.block(0, il, il + 2, 2), m_n, il);
m_ref_nr.coeffRef(il + 1) = 1;
return;
}
// For block size >=3, use the regular strategy
// m20 = x21 * x10
const Scalar m20 = m_mat_H.coeff(il + 2, il + 1) * m_mat_H.coeff(il + 1, il);
compute_reflector(m00, m10, m20, il);
// Apply the first reflector
apply_PX(m_mat_H.block(il, il, 3, m_n - il), m_n, il);
apply_XP(m_mat_H.block(0, il, il + std::min(bsize, Index(4)), 3), m_n, il);
// Calculate the following reflectors
// If entering this loop, block size is at least 4.
for (Index i = 1; i < bsize - 2; i++)
{
compute_reflector(&m_mat_H.coeffRef(il + i, il + i - 1), il + i);
// Apply the reflector to X
apply_PX(m_mat_H.block(il + i, il + i - 1, 3, m_n - il - i + 1), m_n, il + i);
apply_XP(m_mat_H.block(0, il + i, il + std::min(bsize, Index(i + 4)), 3), m_n, il + i);
}
// The last reflector
// This causes nr=2
compute_reflector(m_mat_H.coeff(iu - 1, iu - 2), m_mat_H.coeff(iu, iu - 2), 0, iu - 1);
// Apply the reflector to X
apply_PX(m_mat_H.block(iu - 1, iu - 2, 2, m_n - iu + 2), m_n, iu - 1);
apply_XP(m_mat_H.block(0, iu - 1, il + bsize, 2), m_n, iu - 1);
m_ref_nr.coeffRef(iu) = 1;
}
// P = I - 2 * u * u' = P'
// PX = X - 2 * u * (u'X)
void apply_PX(GenericMatrix X, Index stride, Index u_ind) const
{
const Index nr = m_ref_nr.coeff(u_ind);
if (nr == 1)
return;
const Scalar u0 = m_ref_u.coeff(0, u_ind),
u1 = m_ref_u.coeff(1, u_ind);
const Scalar u0_2 = Scalar(2) * u0,
u1_2 = Scalar(2) * u1;
const Index nrow = X.rows();
const Index ncol = X.cols();
Scalar* xptr = X.data();
if (nr == 2 || nrow == 2)
{
for (Index i = 0; i < ncol; i++, xptr += stride)
{
const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1];
xptr[0] -= tmp * u0;
xptr[1] -= tmp * u1;
}
}
else
{
const Scalar u2 = m_ref_u.coeff(2, u_ind);
const Scalar u2_2 = Scalar(2) * u2;
for (Index i = 0; i < ncol; i++, xptr += stride)
{
const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1] + u2_2 * xptr[2];
xptr[0] -= tmp * u0;
xptr[1] -= tmp * u1;
xptr[2] -= tmp * u2;
}
}
}
// x is a pointer to a vector
// Px = x - 2 * dot(x, u) * u
void apply_PX(Scalar* x, Index u_ind) const
{
const Index nr = m_ref_nr.coeff(u_ind);
if (nr == 1)
return;
const Scalar u0 = m_ref_u.coeff(0, u_ind),
u1 = m_ref_u.coeff(1, u_ind),
u2 = m_ref_u.coeff(2, u_ind);
// When the reflector only contains two elements, u2 has been set to zero
const bool nr_is_2 = (nr == 2);
const Scalar dot2 = Scalar(2) * (x[0] * u0 + x[1] * u1 + (nr_is_2 ? 0 : (x[2] * u2)));
x[0] -= dot2 * u0;
x[1] -= dot2 * u1;
if (!nr_is_2)
x[2] -= dot2 * u2;
}
// XP = X - 2 * (X * u) * u'
void apply_XP(GenericMatrix X, Index stride, Index u_ind) const
{
const Index nr = m_ref_nr.coeff(u_ind);
if (nr == 1)
return;
const Scalar u0 = m_ref_u.coeff(0, u_ind),
u1 = m_ref_u.coeff(1, u_ind);
const Scalar u0_2 = Scalar(2) * u0,
u1_2 = Scalar(2) * u1;
const int nrow = X.rows();
const int ncol = X.cols();
Scalar *X0 = X.data(), *X1 = X0 + stride; // X0 => X.col(0), X1 => X.col(1)
if (nr == 2 || ncol == 2)
{
// tmp = 2 * u0 * X0 + 2 * u1 * X1
// X0 => X0 - u0 * tmp
// X1 => X1 - u1 * tmp
for (Index i = 0; i < nrow; i++)
{
const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i];
X0[i] -= tmp * u0;
X1[i] -= tmp * u1;
}
}
else
{
Scalar* X2 = X1 + stride; // X2 => X.col(2)
const Scalar u2 = m_ref_u.coeff(2, u_ind);
const Scalar u2_2 = Scalar(2) * u2;
for (Index i = 0; i < nrow; i++)
{
const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i] + u2_2 * X2[i];
X0[i] -= tmp * u0;
X1[i] -= tmp * u1;
X2[i] -= tmp * u2;
}
}
}
public:
DoubleShiftQR(Index size) :
m_n(size),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
m_eps(Eigen::NumTraits<Scalar>::epsilon()),
m_eps_rel(m_eps),
m_eps_abs(m_near_0 * (m_n / m_eps)),
m_computed(false)
{}
DoubleShiftQR(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) :
m_n(mat.rows()),
m_mat_H(m_n, m_n),
m_shift_s(s),
m_shift_t(t),
m_ref_u(3, m_n),
m_ref_nr(m_n),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
m_eps(Eigen::NumTraits<Scalar>::epsilon()),
m_eps_rel(m_eps),
m_eps_abs(m_near_0 * (m_n / m_eps)),
m_computed(false)
{
compute(mat, s, t);
}
void compute(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t)
{
using std::abs;
m_n = mat.rows();
if (m_n != mat.cols())
throw std::invalid_argument("DoubleShiftQR: matrix must be square");
m_mat_H.resize(m_n, m_n);
m_shift_s = s;
m_shift_t = t;
m_ref_u.resize(3, m_n);
m_ref_nr.resize(m_n);
// Make a copy of mat
std::copy(mat.data(), mat.data() + mat.size(), m_mat_H.data());
// Obtain the indices of zero elements in the subdiagonal,
// so that H can be divided into several blocks
std::vector<int> zero_ind;
zero_ind.reserve(m_n - 1);
zero_ind.push_back(0);
Scalar* Hii = m_mat_H.data();
for (Index i = 0; i < m_n - 2; i++, Hii += (m_n + 1))
{
// Hii[1] => m_mat_H(i + 1, i)
const Scalar h = abs(Hii[1]);
if (h <= 0 || h <= m_eps_rel * (abs(Hii[0]) + abs(Hii[m_n + 1])))
{
Hii[1] = 0;
zero_ind.push_back(i + 1);
}
// Make sure m_mat_H is upper Hessenberg
// Zero the elements below m_mat_H(i + 1, i)
std::fill(Hii + 2, Hii + m_n - i, Scalar(0));
}
zero_ind.push_back(m_n);
for (std::vector<int>::size_type i = 0; i < zero_ind.size() - 1; i++)
{
const Index start = zero_ind[i];
const Index end = zero_ind[i + 1] - 1;
// Compute refelctors and update each block
update_block(start, end);
}
m_computed = true;
}
void matrix_QtHQ(Matrix& dest) const
{
if (!m_computed)
throw std::logic_error("DoubleShiftQR: need to call compute() first");
dest.noalias() = m_mat_H;
}
// Q = P0 * P1 * ...
// Q'y = P_{n-2} * ... * P1 * P0 * y
void apply_QtY(Vector& y) const
{
if (!m_computed)
throw std::logic_error("DoubleShiftQR: need to call compute() first");
Scalar* y_ptr = y.data();
const Index n1 = m_n - 1;
for (Index i = 0; i < n1; i++, y_ptr++)
{
apply_PX(y_ptr, i);
}
}
// Q = P0 * P1 * ...
// YQ = Y * P0 * P1 * ...
void apply_YQ(GenericMatrix Y) const
{
if (!m_computed)
throw std::logic_error("DoubleShiftQR: need to call compute() first");
const Index nrow = Y.rows();
const Index n2 = m_n - 2;
for (Index i = 0; i < n2; i++)
{
apply_XP(Y.block(0, i, nrow, 3), nrow, i);
}
apply_XP(Y.block(0, n2, nrow, 2), nrow, n2);
}
};
} // namespace Spectra
#endif // DOUBLE_SHIFT_QR_H

167
gtsam/3rdparty/Spectra/LinAlg/Lanczos.h vendored Normal file
View File

@ -0,0 +1,167 @@
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef LANCZOS_H
#define LANCZOS_H
#include <Eigen/Core>
#include <cmath> // std::sqrt
#include <stdexcept> // std::invalid_argument
#include <sstream> // std::stringstream
#include "Arnoldi.h"
namespace Spectra {
// Lanczos factorization A * V = V * H + f * e'
// A: n x n
// V: n x k
// H: k x k
// f: n x 1
// e: [0, ..., 0, 1]
// V and H are allocated of dimension m, so the maximum value of k is m
template <typename Scalar, typename ArnoldiOpType>
class Lanczos : public Arnoldi<Scalar, ArnoldiOpType>
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<Matrix> MapMat;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::Map<const Matrix> MapConstMat;
typedef Eigen::Map<const Vector> MapConstVec;
using Arnoldi<Scalar, ArnoldiOpType>::m_op;
using Arnoldi<Scalar, ArnoldiOpType>::m_n;
using Arnoldi<Scalar, ArnoldiOpType>::m_m;
using Arnoldi<Scalar, ArnoldiOpType>::m_k;
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_V;
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_H;
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_f;
using Arnoldi<Scalar, ArnoldiOpType>::m_beta;
using Arnoldi<Scalar, ArnoldiOpType>::m_near_0;
using Arnoldi<Scalar, ArnoldiOpType>::m_eps;
public:
Lanczos(const ArnoldiOpType& op, Index m) :
Arnoldi<Scalar, ArnoldiOpType>(op, m)
{}
// Lanczos factorization starting from step-k
void factorize_from(Index from_k, Index to_m, Index& op_counter)
{
using std::sqrt;
if (to_m <= from_k)
return;
if (from_k > m_k)
{
std::stringstream msg;
msg << "Lanczos: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")";
throw std::invalid_argument(msg.str());
}
const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n));
// Pre-allocate vectors
Vector Vf(to_m);
Vector w(m_n);
// Keep the upperleft k x k submatrix of H and set other elements to 0
m_fac_H.rightCols(m_m - from_k).setZero();
m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero();
for (Index i = from_k; i <= to_m - 1; i++)
{
bool restart = false;
// If beta = 0, then the next V is not full rank
// We need to generate a new residual vector that is orthogonal
// to the current V, which we call a restart
if (m_beta < m_near_0)
{
MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns
this->expand_basis(V, 2 * i, m_fac_f, m_beta);
restart = true;
}
// v <- f / ||f||
MapVec v(&m_fac_V(0, i), m_n); // The (i+1)-th column
v.noalias() = m_fac_f / m_beta;
// Note that H[i+1, i] equals to the unrestarted beta
m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta;
// w <- A * v
m_op.perform_op(v.data(), w.data());
op_counter++;
// H[i+1, i+1] = <v, w> = v'Bw
m_fac_H(i - 1, i) = m_fac_H(i, i - 1); // Due to symmetry
m_fac_H(i, i) = m_op.inner_product(v, w);
// f <- w - V * V'Bw = w - H[i+1, i] * V{i} - H[i+1, i+1] * V{i+1}
// If restarting, we know that H[i+1, i] = 0
if (restart)
m_fac_f.noalias() = w - m_fac_H(i, i) * v;
else
m_fac_f.noalias() = w - m_fac_H(i, i - 1) * m_fac_V.col(i - 1) - m_fac_H(i, i) * v;
m_beta = m_op.norm(m_fac_f);
// f/||f|| is going to be the next column of V, so we need to test
// whether V'B(f/||f||) ~= 0
const Index i1 = i + 1;
MapMat Vs(m_fac_V.data(), m_n, i1); // The first (i+1) columns
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
// If not, iteratively correct the residual
int count = 0;
while (count < 5 && ortho_err > m_eps * m_beta)
{
// There is an edge case: when beta=||f|| is close to zero, f mostly consists
// of noises of rounding errors, so the test [ortho_err < eps * beta] is very
// likely to fail. In particular, if beta=0, then the test is ensured to fail.
// Hence when this happens, we force f to be zero, and then restart in the
// next iteration.
if (m_beta < beta_thresh)
{
m_fac_f.setZero();
m_beta = Scalar(0);
break;
}
// f <- f - V * Vf
m_fac_f.noalias() -= Vs * Vf.head(i1);
// h <- h + Vf
m_fac_H(i - 1, i) += Vf[i - 1];
m_fac_H(i, i - 1) = m_fac_H(i - 1, i);
m_fac_H(i, i) += Vf[i];
// beta <- ||f||
m_beta = m_op.norm(m_fac_f);
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
count++;
}
}
// Indicate that this is a step-m factorization
m_k = to_m;
}
// Apply H -> Q'HQ, where Q is from a tridiagonal QR decomposition
void compress_H(const TridiagQR<Scalar>& decomp)
{
decomp.matrix_QtHQ(m_fac_H);
m_k--;
}
};
} // namespace Spectra
#endif // LANCZOS_H

View File

@ -0,0 +1,219 @@
// The code was adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h
//
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef TRIDIAG_EIGEN_H
#define TRIDIAG_EIGEN_H
#include <Eigen/Core>
#include <Eigen/Jacobi>
#include <stdexcept>
#include "../Util/TypeTraits.h"
namespace Spectra {
template <typename Scalar = double>
class TridiagEigen
{
private:
typedef Eigen::Index Index;
// For convenience in adapting the tridiagonal_qr_step() function
typedef Scalar RealScalar;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Ref<Matrix> GenericMatrix;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
Index m_n;
Vector m_main_diag; // Main diagonal elements of the matrix
Vector m_sub_diag; // Sub-diagonal elements of the matrix
Matrix m_evecs; // To store eigenvectors
bool m_computed;
const Scalar m_near_0; // a very small value, ~= 1e-307 for the "double" type
// Adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h
static void tridiagonal_qr_step(RealScalar* diag,
RealScalar* subdiag, Index start,
Index end, Scalar* matrixQ,
Index n)
{
using std::abs;
RealScalar td = (diag[end - 1] - diag[end]) * RealScalar(0.5);
RealScalar e = subdiag[end - 1];
// Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
// underflow thus leading to inf/NaN values when using the following commented code:
// RealScalar e2 = numext::abs2(subdiag[end-1]);
// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
// This explain the following, somewhat more complicated, version:
RealScalar mu = diag[end];
if (td == Scalar(0))
mu -= abs(e);
else
{
RealScalar e2 = Eigen::numext::abs2(subdiag[end - 1]);
RealScalar h = Eigen::numext::hypot(td, e);
if (e2 == RealScalar(0))
mu -= (e / (td + (td > RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h);
else
mu -= e2 / (td + (td > RealScalar(0) ? h : -h));
}
RealScalar x = diag[start] - mu;
RealScalar z = subdiag[start];
Eigen::Map<Matrix> q(matrixQ, n, n);
for (Index k = start; k < end; ++k)
{
Eigen::JacobiRotation<RealScalar> rot;
rot.makeGivens(x, z);
const RealScalar s = rot.s();
const RealScalar c = rot.c();
// do T = G' T G
RealScalar sdk = s * diag[k] + c * subdiag[k];
RealScalar dkp1 = s * subdiag[k] + c * diag[k + 1];
diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k + 1]);
diag[k + 1] = s * sdk + c * dkp1;
subdiag[k] = c * sdk - s * dkp1;
if (k > start)
subdiag[k - 1] = c * subdiag[k - 1] - s * z;
x = subdiag[k];
if (k < end - 1)
{
z = -s * subdiag[k + 1];
subdiag[k + 1] = c * subdiag[k + 1];
}
// apply the givens rotation to the unit matrix Q = Q * G
if (matrixQ)
q.applyOnTheRight(k, k + 1, rot);
}
}
public:
TridiagEigen() :
m_n(0), m_computed(false),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10))
{}
TridiagEigen(ConstGenericMatrix& mat) :
m_n(mat.rows()), m_computed(false),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10))
{
compute(mat);
}
void compute(ConstGenericMatrix& mat)
{
using std::abs;
m_n = mat.rows();
if (m_n != mat.cols())
throw std::invalid_argument("TridiagEigen: matrix must be square");
m_main_diag.resize(m_n);
m_sub_diag.resize(m_n - 1);
m_evecs.resize(m_n, m_n);
m_evecs.setIdentity();
// Scale matrix to improve stability
const Scalar scale = std::max(mat.diagonal().cwiseAbs().maxCoeff(),
mat.diagonal(-1).cwiseAbs().maxCoeff());
// If scale=0, mat is a zero matrix, so we can early stop
if (scale < m_near_0)
{
// m_main_diag contains eigenvalues
m_main_diag.setZero();
// m_evecs has been set identity
// m_evecs.setIdentity();
m_computed = true;
return;
}
m_main_diag.noalias() = mat.diagonal() / scale;
m_sub_diag.noalias() = mat.diagonal(-1) / scale;
Scalar* diag = m_main_diag.data();
Scalar* subdiag = m_sub_diag.data();
Index end = m_n - 1;
Index start = 0;
Index iter = 0; // total number of iterations
int info = 0; // 0 for success, 1 for failure
const Scalar considerAsZero = TypeTraits<Scalar>::min();
const Scalar precision = Scalar(2) * Eigen::NumTraits<Scalar>::epsilon();
while (end > 0)
{
for (Index i = start; i < end; i++)
if (abs(subdiag[i]) <= considerAsZero ||
abs(subdiag[i]) <= (abs(diag[i]) + abs(diag[i + 1])) * precision)
subdiag[i] = 0;
// find the largest unreduced block
while (end > 0 && subdiag[end - 1] == Scalar(0))
end--;
if (end <= 0)
break;
// if we spent too many iterations, we give up
iter++;
if (iter > 30 * m_n)
{
info = 1;
break;
}
start = end - 1;
while (start > 0 && subdiag[start - 1] != Scalar(0))
start--;
tridiagonal_qr_step(diag, subdiag, start, end, m_evecs.data(), m_n);
}
if (info > 0)
throw std::runtime_error("TridiagEigen: eigen decomposition failed");
// Scale eigenvalues back
m_main_diag *= scale;
m_computed = true;
}
const Vector& eigenvalues() const
{
if (!m_computed)
throw std::logic_error("TridiagEigen: need to call compute() first");
// After calling compute(), main_diag will contain the eigenvalues.
return m_main_diag;
}
const Matrix& eigenvectors() const
{
if (!m_computed)
throw std::logic_error("TridiagEigen: need to call compute() first");
return m_evecs;
}
};
} // namespace Spectra
#endif // TRIDIAG_EIGEN_H

View File

@ -0,0 +1,319 @@
// The code was adapted from Eigen/src/Eigenvaleus/EigenSolver.h
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef UPPER_HESSENBERG_EIGEN_H
#define UPPER_HESSENBERG_EIGEN_H
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include <stdexcept>
namespace Spectra {
template <typename Scalar = double>
class UpperHessenbergEigen
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Ref<Matrix> GenericMatrix;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
typedef std::complex<Scalar> Complex;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
Index m_n; // Size of the matrix
Eigen::RealSchur<Matrix> m_realSchur; // Schur decomposition solver
Matrix m_matT; // Schur T matrix
Matrix m_eivec; // Storing eigenvectors
ComplexVector m_eivalues; // Eigenvalues
bool m_computed;
void doComputeEigenvectors()
{
using std::abs;
const Index size = m_eivec.cols();
const Scalar eps = Eigen::NumTraits<Scalar>::epsilon();
// inefficient! this is already computed in RealSchur
Scalar norm(0);
for (Index j = 0; j < size; ++j)
{
norm += m_matT.row(j).segment((std::max)(j - 1, Index(0)), size - (std::max)(j - 1, Index(0))).cwiseAbs().sum();
}
// Backsubstitute to find vectors of upper triangular form
if (norm == Scalar(0))
return;
for (Index n = size - 1; n >= 0; n--)
{
Scalar p = m_eivalues.coeff(n).real();
Scalar q = m_eivalues.coeff(n).imag();
// Scalar vector
if (q == Scalar(0))
{
Scalar lastr(0), lastw(0);
Index l = n;
m_matT.coeffRef(n, n) = Scalar(1);
for (Index i = n - 1; i >= 0; i--)
{
Scalar w = m_matT.coeff(i, i) - p;
Scalar r = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1));
if (m_eivalues.coeff(i).imag() < Scalar(0))
{
lastw = w;
lastr = r;
}
else
{
l = i;
if (m_eivalues.coeff(i).imag() == Scalar(0))
{
if (w != Scalar(0))
m_matT.coeffRef(i, n) = -r / w;
else
m_matT.coeffRef(i, n) = -r / (eps * norm);
}
else // Solve real equations
{
Scalar x = m_matT.coeff(i, i + 1);
Scalar y = m_matT.coeff(i + 1, i);
Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
Scalar t = (x * lastr - lastw * r) / denom;
m_matT.coeffRef(i, n) = t;
if (abs(x) > abs(lastw))
m_matT.coeffRef(i + 1, n) = (-r - w * t) / x;
else
m_matT.coeffRef(i + 1, n) = (-lastr - y * t) / lastw;
}
// Overflow control
Scalar t = abs(m_matT.coeff(i, n));
if ((eps * t) * t > Scalar(1))
m_matT.col(n).tail(size - i) /= t;
}
}
}
else if (q < Scalar(0) && n > 0)
{ // Complex vector
Scalar lastra(0), lastsa(0), lastw(0);
Index l = n - 1;
// Last vector component imaginary so matrix is triangular
if (abs(m_matT.coeff(n, n - 1)) > abs(m_matT.coeff(n - 1, n)))
{
m_matT.coeffRef(n - 1, n - 1) = q / m_matT.coeff(n, n - 1);
m_matT.coeffRef(n - 1, n) = -(m_matT.coeff(n, n) - p) / m_matT.coeff(n, n - 1);
}
else
{
Complex cc = Complex(Scalar(0), -m_matT.coeff(n - 1, n)) / Complex(m_matT.coeff(n - 1, n - 1) - p, q);
m_matT.coeffRef(n - 1, n - 1) = Eigen::numext::real(cc);
m_matT.coeffRef(n - 1, n) = Eigen::numext::imag(cc);
}
m_matT.coeffRef(n, n - 1) = Scalar(0);
m_matT.coeffRef(n, n) = Scalar(1);
for (Index i = n - 2; i >= 0; i--)
{
Scalar ra = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n - 1).segment(l, n - l + 1));
Scalar sa = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1));
Scalar w = m_matT.coeff(i, i) - p;
if (m_eivalues.coeff(i).imag() < Scalar(0))
{
lastw = w;
lastra = ra;
lastsa = sa;
}
else
{
l = i;
if (m_eivalues.coeff(i).imag() == Scalar(0))
{
Complex cc = Complex(-ra, -sa) / Complex(w, q);
m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc);
m_matT.coeffRef(i, n) = Eigen::numext::imag(cc);
}
else
{
// Solve complex equations
Scalar x = m_matT.coeff(i, i + 1);
Scalar y = m_matT.coeff(i + 1, i);
Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;
if ((vr == Scalar(0)) && (vi == Scalar(0)))
vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw));
Complex cc = Complex(x * lastra - lastw * ra + q * sa, x * lastsa - lastw * sa - q * ra) / Complex(vr, vi);
m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc);
m_matT.coeffRef(i, n) = Eigen::numext::imag(cc);
if (abs(x) > (abs(lastw) + abs(q)))
{
m_matT.coeffRef(i + 1, n - 1) = (-ra - w * m_matT.coeff(i, n - 1) + q * m_matT.coeff(i, n)) / x;
m_matT.coeffRef(i + 1, n) = (-sa - w * m_matT.coeff(i, n) - q * m_matT.coeff(i, n - 1)) / x;
}
else
{
cc = Complex(-lastra - y * m_matT.coeff(i, n - 1), -lastsa - y * m_matT.coeff(i, n)) / Complex(lastw, q);
m_matT.coeffRef(i + 1, n - 1) = Eigen::numext::real(cc);
m_matT.coeffRef(i + 1, n) = Eigen::numext::imag(cc);
}
}
// Overflow control
Scalar t = std::max(abs(m_matT.coeff(i, n - 1)), abs(m_matT.coeff(i, n)));
if ((eps * t) * t > Scalar(1))
m_matT.block(i, n - 1, size - i, 2) /= t;
}
}
// We handled a pair of complex conjugate eigenvalues, so need to skip them both
n--;
}
}
// Back transformation to get eigenvectors of original matrix
Vector m_tmp(size);
for (Index j = size - 1; j >= 0; j--)
{
m_tmp.noalias() = m_eivec.leftCols(j + 1) * m_matT.col(j).segment(0, j + 1);
m_eivec.col(j) = m_tmp;
}
}
public:
UpperHessenbergEigen() :
m_n(0), m_computed(false)
{}
UpperHessenbergEigen(ConstGenericMatrix& mat) :
m_n(mat.rows()), m_computed(false)
{
compute(mat);
}
void compute(ConstGenericMatrix& mat)
{
using std::abs;
using std::sqrt;
if (mat.rows() != mat.cols())
throw std::invalid_argument("UpperHessenbergEigen: matrix must be square");
m_n = mat.rows();
// Scale matrix prior to the Schur decomposition
const Scalar scale = mat.cwiseAbs().maxCoeff();
// Reduce to real Schur form
Matrix Q = Matrix::Identity(m_n, m_n);
m_realSchur.computeFromHessenberg(mat / scale, Q, true);
if (m_realSchur.info() != Eigen::Success)
throw std::runtime_error("UpperHessenbergEigen: eigen decomposition failed");
m_matT = m_realSchur.matrixT();
m_eivec = m_realSchur.matrixU();
// Compute eigenvalues from matT
m_eivalues.resize(m_n);
Index i = 0;
while (i < m_n)
{
// Real eigenvalue
if (i == m_n - 1 || m_matT.coeff(i + 1, i) == Scalar(0))
{
m_eivalues.coeffRef(i) = m_matT.coeff(i, i);
++i;
}
else // Complex eigenvalues
{
Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i + 1, i + 1));
Scalar z;
// Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
// without overflow
{
Scalar t0 = m_matT.coeff(i + 1, i);
Scalar t1 = m_matT.coeff(i, i + 1);
Scalar maxval = std::max(abs(p), std::max(abs(t0), abs(t1)));
t0 /= maxval;
t1 /= maxval;
Scalar p0 = p / maxval;
z = maxval * sqrt(abs(p0 * p0 + t0 * t1));
}
m_eivalues.coeffRef(i) = Complex(m_matT.coeff(i + 1, i + 1) + p, z);
m_eivalues.coeffRef(i + 1) = Complex(m_matT.coeff(i + 1, i + 1) + p, -z);
i += 2;
}
}
// Compute eigenvectors
doComputeEigenvectors();
// Scale eigenvalues back
m_eivalues *= scale;
m_computed = true;
}
const ComplexVector& eigenvalues() const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergEigen: need to call compute() first");
return m_eivalues;
}
ComplexMatrix eigenvectors()
{
using std::abs;
if (!m_computed)
throw std::logic_error("UpperHessenbergEigen: need to call compute() first");
Index n = m_eivec.cols();
ComplexMatrix matV(n, n);
for (Index j = 0; j < n; ++j)
{
// imaginary part of real eigenvalue is already set to exact zero
if (Eigen::numext::imag(m_eivalues.coeff(j)) == Scalar(0) || j + 1 == n)
{
// we have a real eigen value
matV.col(j) = m_eivec.col(j).template cast<Complex>();
matV.col(j).normalize();
}
else
{
// we have a pair of complex eigen values
for (Index i = 0; i < n; ++i)
{
matV.coeffRef(i, j) = Complex(m_eivec.coeff(i, j), m_eivec.coeff(i, j + 1));
matV.coeffRef(i, j + 1) = Complex(m_eivec.coeff(i, j), -m_eivec.coeff(i, j + 1));
}
matV.col(j).normalize();
matV.col(j + 1).normalize();
++j;
}
}
return matV;
}
};
} // namespace Spectra
#endif // UPPER_HESSENBERG_EIGEN_H

View File

@ -0,0 +1,670 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef UPPER_HESSENBERG_QR_H
#define UPPER_HESSENBERG_QR_H
#include <Eigen/Core>
#include <cmath> // std::sqrt
#include <algorithm> // std::fill, std::copy
#include <stdexcept> // std::logic_error
namespace Spectra {
///
/// \defgroup Internals Internal Classes
///
/// Classes for internal use. May be useful to developers.
///
///
/// \ingroup Internals
/// @{
///
///
/// \defgroup LinearAlgebra Linear Algebra
///
/// A number of classes for linear algebra operations.
///
///
/// \ingroup LinearAlgebra
///
/// Perform the QR decomposition of an upper Hessenberg matrix.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
///
template <typename Scalar = double>
class UpperHessenbergQR
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Matrix<Scalar, 1, Eigen::Dynamic> RowVector;
typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> Array;
typedef Eigen::Ref<Matrix> GenericMatrix;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
Matrix m_mat_T;
protected:
Index m_n;
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
// Q = G1 * G2 * ... * G_{n-1}
Scalar m_shift;
Array m_rot_cos;
Array m_rot_sin;
bool m_computed;
// Given x and y, compute 1) r = sqrt(x^2 + y^2), 2) c = x / r, 3) s = -y / r
// If both x and y are zero, set c = 1 and s = 0
// We must implement it in a numerically stable way
static void compute_rotation(const Scalar& x, const Scalar& y, Scalar& r, Scalar& c, Scalar& s)
{
using std::sqrt;
const Scalar xsign = (x > Scalar(0)) - (x < Scalar(0));
const Scalar ysign = (y > Scalar(0)) - (y < Scalar(0));
const Scalar xabs = x * xsign;
const Scalar yabs = y * ysign;
if (xabs > yabs)
{
// In this case xabs != 0
const Scalar ratio = yabs / xabs; // so that 0 <= ratio < 1
const Scalar common = sqrt(Scalar(1) + ratio * ratio);
c = xsign / common;
r = xabs * common;
s = -y / r;
}
else
{
if (yabs == Scalar(0))
{
r = Scalar(0);
c = Scalar(1);
s = Scalar(0);
return;
}
const Scalar ratio = xabs / yabs; // so that 0 <= ratio <= 1
const Scalar common = sqrt(Scalar(1) + ratio * ratio);
s = -ysign / common;
r = yabs * common;
c = x / r;
}
}
public:
///
/// Constructor to preallocate memory. Computation can
/// be performed later by calling the compute() method.
///
UpperHessenbergQR(Index size) :
m_n(size),
m_rot_cos(m_n - 1),
m_rot_sin(m_n - 1),
m_computed(false)
{}
///
/// Constructor to create an object that performs and stores the
/// QR decomposition of an upper Hessenberg matrix `mat`, with an
/// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix
/// `mat`, and \f$s\f$ is the shift.
///
/// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
/// Only the upper triangular and the lower subdiagonal parts of
/// the matrix are used.
///
UpperHessenbergQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) :
m_n(mat.rows()),
m_shift(shift),
m_rot_cos(m_n - 1),
m_rot_sin(m_n - 1),
m_computed(false)
{
compute(mat, shift);
}
///
/// Virtual destructor.
///
virtual ~UpperHessenbergQR(){};
///
/// Conduct the QR factorization of an upper Hessenberg matrix with
/// an optional shift.
///
/// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
/// Only the upper triangular and the lower subdiagonal parts of
/// the matrix are used.
///
virtual void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0))
{
m_n = mat.rows();
if (m_n != mat.cols())
throw std::invalid_argument("UpperHessenbergQR: matrix must be square");
m_shift = shift;
m_mat_T.resize(m_n, m_n);
m_rot_cos.resize(m_n - 1);
m_rot_sin.resize(m_n - 1);
// Make a copy of mat - s * I
std::copy(mat.data(), mat.data() + mat.size(), m_mat_T.data());
m_mat_T.diagonal().array() -= m_shift;
Scalar xi, xj, r, c, s;
Scalar *Tii, *ptr;
const Index n1 = m_n - 1;
for (Index i = 0; i < n1; i++)
{
Tii = &m_mat_T.coeffRef(i, i);
// Make sure mat_T is upper Hessenberg
// Zero the elements below mat_T(i + 1, i)
std::fill(Tii + 2, Tii + m_n - i, Scalar(0));
xi = Tii[0]; // mat_T(i, i)
xj = Tii[1]; // mat_T(i + 1, i)
compute_rotation(xi, xj, r, c, s);
m_rot_cos[i] = c;
m_rot_sin[i] = s;
// For a complete QR decomposition,
// we first obtain the rotation matrix
// G = [ cos sin]
// [-sin cos]
// and then do T[i:(i + 1), i:(n - 1)] = G' * T[i:(i + 1), i:(n - 1)]
// Gt << c, -s, s, c;
// m_mat_T.block(i, i, 2, m_n - i) = Gt * m_mat_T.block(i, i, 2, m_n - i);
Tii[0] = r; // m_mat_T(i, i) => r
Tii[1] = 0; // m_mat_T(i + 1, i) => 0
ptr = Tii + m_n; // m_mat_T(i, k), k = i+1, i+2, ..., n-1
for (Index j = i + 1; j < m_n; j++, ptr += m_n)
{
Scalar tmp = ptr[0];
ptr[0] = c * tmp - s * ptr[1];
ptr[1] = s * tmp + c * ptr[1];
}
// If we do not need to calculate the R matrix, then
// only the cos and sin sequences are required.
// In such case we only update T[i + 1, (i + 1):(n - 1)]
// m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) *= c;
// m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) += s * mat_T.block(i, i + 1, 1, m_n - i - 1);
}
m_computed = true;
}
///
/// Return the \f$R\f$ matrix in the QR decomposition, which is an
/// upper triangular matrix.
///
/// \return Returned matrix type will be `Eigen::Matrix<Scalar, ...>`, depending on
/// the template parameter `Scalar` defined.
///
virtual Matrix matrix_R() const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
return m_mat_T;
}
///
/// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`,
/// and \f$s\f$ is the shift. The result is an upper Hessenberg matrix.
///
/// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix<Scalar, ...>`,
/// depending on the template parameter `Scalar` defined.
///
virtual void matrix_QtHQ(Matrix& dest) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
// Make a copy of the R matrix
dest.resize(m_n, m_n);
std::copy(m_mat_T.data(), m_mat_T.data() + m_mat_T.size(), dest.data());
// Compute the RQ matrix
const Index n1 = m_n - 1;
for (Index i = 0; i < n1; i++)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// RQ[, i:(i + 1)] = RQ[, i:(i + 1)] * Gi
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
Scalar *Yi, *Yi1;
Yi = &dest.coeffRef(0, i);
Yi1 = Yi + m_n; // RQ(0, i + 1)
const Index i2 = i + 2;
for (Index j = 0; j < i2; j++)
{
const Scalar tmp = Yi[j];
Yi[j] = c * tmp - s * Yi1[j];
Yi1[j] = s * tmp + c * Yi1[j];
}
/* Vector dest = RQ.block(0, i, i + 2, 1);
dest.block(0, i, i + 2, 1) = c * Yi - s * dest.block(0, i + 1, i + 2, 1);
dest.block(0, i + 1, i + 2, 1) = s * Yi + c * dest.block(0, i + 1, i + 2, 1); */
}
// Add the shift to the diagonal
dest.diagonal().array() += m_shift;
}
///
/// Apply the \f$Q\f$ matrix to a vector \f$y\f$.
///
/// \param Y A vector that will be overwritten by the matrix product \f$Qy\f$.
///
/// Vector type can be `Eigen::Vector<Scalar, ...>`, depending on
/// the template parameter `Scalar` defined.
///
// Y -> QY = G1 * G2 * ... * Y
void apply_QY(Vector& Y) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
for (Index i = m_n - 2; i >= 0; i--)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// Y[i:(i + 1)] = Gi * Y[i:(i + 1)]
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
const Scalar tmp = Y[i];
Y[i] = c * tmp + s * Y[i + 1];
Y[i + 1] = -s * tmp + c * Y[i + 1];
}
}
///
/// Apply the \f$Q\f$ matrix to a vector \f$y\f$.
///
/// \param Y A vector that will be overwritten by the matrix product \f$Q'y\f$.
///
/// Vector type can be `Eigen::Vector<Scalar, ...>`, depending on
/// the template parameter `Scalar` defined.
///
// Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y
void apply_QtY(Vector& Y) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
const Index n1 = m_n - 1;
for (Index i = 0; i < n1; i++)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// Y[i:(i + 1)] = Gi' * Y[i:(i + 1)]
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
const Scalar tmp = Y[i];
Y[i] = c * tmp - s * Y[i + 1];
Y[i + 1] = s * tmp + c * Y[i + 1];
}
}
///
/// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$.
///
/// \param Y A matrix that will be overwritten by the matrix product \f$QY\f$.
///
/// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
// Y -> QY = G1 * G2 * ... * Y
void apply_QY(GenericMatrix Y) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
RowVector Yi(Y.cols()), Yi1(Y.cols());
for (Index i = m_n - 2; i >= 0; i--)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// Y[i:(i + 1), ] = Gi * Y[i:(i + 1), ]
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
Yi.noalias() = Y.row(i);
Yi1.noalias() = Y.row(i + 1);
Y.row(i) = c * Yi + s * Yi1;
Y.row(i + 1) = -s * Yi + c * Yi1;
}
}
///
/// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$.
///
/// \param Y A matrix that will be overwritten by the matrix product \f$Q'Y\f$.
///
/// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
// Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y
void apply_QtY(GenericMatrix Y) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
RowVector Yi(Y.cols()), Yi1(Y.cols());
const Index n1 = m_n - 1;
for (Index i = 0; i < n1; i++)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// Y[i:(i + 1), ] = Gi' * Y[i:(i + 1), ]
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
Yi.noalias() = Y.row(i);
Yi1.noalias() = Y.row(i + 1);
Y.row(i) = c * Yi - s * Yi1;
Y.row(i + 1) = s * Yi + c * Yi1;
}
}
///
/// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$.
///
/// \param Y A matrix that will be overwritten by the matrix product \f$YQ\f$.
///
/// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
// Y -> YQ = Y * G1 * G2 * ...
void apply_YQ(GenericMatrix Y) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
/*Vector Yi(Y.rows());
for(Index i = 0; i < m_n - 1; i++)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
Yi.noalias() = Y.col(i);
Y.col(i) = c * Yi - s * Y.col(i + 1);
Y.col(i + 1) = s * Yi + c * Y.col(i + 1);
}*/
Scalar *Y_col_i, *Y_col_i1;
const Index n1 = m_n - 1;
const Index nrow = Y.rows();
for (Index i = 0; i < n1; i++)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
Y_col_i = &Y.coeffRef(0, i);
Y_col_i1 = &Y.coeffRef(0, i + 1);
for (Index j = 0; j < nrow; j++)
{
Scalar tmp = Y_col_i[j];
Y_col_i[j] = c * tmp - s * Y_col_i1[j];
Y_col_i1[j] = s * tmp + c * Y_col_i1[j];
}
}
}
///
/// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$.
///
/// \param Y A matrix that will be overwritten by the matrix product \f$YQ'\f$.
///
/// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
// Y -> YQ' = Y * G_{n-1}' * ... * G2' * G1'
void apply_YQt(GenericMatrix Y) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
Vector Yi(Y.rows());
for (Index i = m_n - 2; i >= 0; i--)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi'
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
Yi.noalias() = Y.col(i);
Y.col(i) = c * Yi + s * Y.col(i + 1);
Y.col(i + 1) = -s * Yi + c * Y.col(i + 1);
}
}
};
///
/// \ingroup LinearAlgebra
///
/// Perform the QR decomposition of a tridiagonal matrix, a special
/// case of upper Hessenberg matrices.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
///
template <typename Scalar = double>
class TridiagQR : public UpperHessenbergQR<Scalar>
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
typedef typename Matrix::Index Index;
Vector m_T_diag; // diagonal elements of T
Vector m_T_lsub; // lower subdiagonal of T
Vector m_T_usub; // upper subdiagonal of T
Vector m_T_usub2; // 2nd upper subdiagonal of T
public:
///
/// Constructor to preallocate memory. Computation can
/// be performed later by calling the compute() method.
///
TridiagQR(Index size) :
UpperHessenbergQR<Scalar>(size)
{}
///
/// Constructor to create an object that performs and stores the
/// QR decomposition of an upper Hessenberg matrix `mat`, with an
/// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix
/// `mat`, and \f$s\f$ is the shift.
///
/// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
/// Only the major- and sub- diagonal parts of
/// the matrix are used.
///
TridiagQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) :
UpperHessenbergQR<Scalar>(mat.rows())
{
this->compute(mat, shift);
}
///
/// Conduct the QR factorization of a tridiagonal matrix with an
/// optional shift.
///
/// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
/// Only the major- and sub- diagonal parts of
/// the matrix are used.
///
void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0))
{
this->m_n = mat.rows();
if (this->m_n != mat.cols())
throw std::invalid_argument("TridiagQR: matrix must be square");
this->m_shift = shift;
m_T_diag.resize(this->m_n);
m_T_lsub.resize(this->m_n - 1);
m_T_usub.resize(this->m_n - 1);
m_T_usub2.resize(this->m_n - 2);
this->m_rot_cos.resize(this->m_n - 1);
this->m_rot_sin.resize(this->m_n - 1);
m_T_diag.array() = mat.diagonal().array() - this->m_shift;
m_T_lsub.noalias() = mat.diagonal(-1);
m_T_usub.noalias() = m_T_lsub;
// A number of pointers to avoid repeated address calculation
Scalar *c = this->m_rot_cos.data(), // pointer to the cosine vector
*s = this->m_rot_sin.data(), // pointer to the sine vector
r;
const Index n1 = this->m_n - 1;
for (Index i = 0; i < n1; i++)
{
// diag[i] == T[i, i]
// lsub[i] == T[i + 1, i]
// r = sqrt(T[i, i]^2 + T[i + 1, i]^2)
// c = T[i, i] / r, s = -T[i + 1, i] / r
this->compute_rotation(m_T_diag.coeff(i), m_T_lsub.coeff(i), r, *c, *s);
// For a complete QR decomposition,
// we first obtain the rotation matrix
// G = [ cos sin]
// [-sin cos]
// and then do T[i:(i + 1), i:(i + 2)] = G' * T[i:(i + 1), i:(i + 2)]
// Update T[i, i] and T[i + 1, i]
// The updated value of T[i, i] is known to be r
// The updated value of T[i + 1, i] is known to be 0
m_T_diag.coeffRef(i) = r;
m_T_lsub.coeffRef(i) = Scalar(0);
// Update T[i, i + 1] and T[i + 1, i + 1]
// usub[i] == T[i, i + 1]
// diag[i + 1] == T[i + 1, i + 1]
const Scalar tmp = m_T_usub.coeff(i);
m_T_usub.coeffRef(i) = (*c) * tmp - (*s) * m_T_diag.coeff(i + 1);
m_T_diag.coeffRef(i + 1) = (*s) * tmp + (*c) * m_T_diag.coeff(i + 1);
// Update T[i, i + 2] and T[i + 1, i + 2]
// usub2[i] == T[i, i + 2]
// usub[i + 1] == T[i + 1, i + 2]
if (i < n1 - 1)
{
m_T_usub2.coeffRef(i) = -(*s) * m_T_usub.coeff(i + 1);
m_T_usub.coeffRef(i + 1) *= (*c);
}
c++;
s++;
// If we do not need to calculate the R matrix, then
// only the cos and sin sequences are required.
// In such case we only update T[i + 1, (i + 1):(i + 2)]
// T[i + 1, i + 1] = c * T[i + 1, i + 1] + s * T[i, i + 1];
// T[i + 1, i + 2] *= c;
}
this->m_computed = true;
}
///
/// Return the \f$R\f$ matrix in the QR decomposition, which is an
/// upper triangular matrix.
///
/// \return Returned matrix type will be `Eigen::Matrix<Scalar, ...>`, depending on
/// the template parameter `Scalar` defined.
///
Matrix matrix_R() const
{
if (!this->m_computed)
throw std::logic_error("TridiagQR: need to call compute() first");
Matrix R = Matrix::Zero(this->m_n, this->m_n);
R.diagonal().noalias() = m_T_diag;
R.diagonal(1).noalias() = m_T_usub;
R.diagonal(2).noalias() = m_T_usub2;
return R;
}
///
/// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`,
/// and \f$s\f$ is the shift. The result is a tridiagonal matrix.
///
/// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix<Scalar, ...>`,
/// depending on the template parameter `Scalar` defined.
///
void matrix_QtHQ(Matrix& dest) const
{
if (!this->m_computed)
throw std::logic_error("TridiagQR: need to call compute() first");
// Make a copy of the R matrix
dest.resize(this->m_n, this->m_n);
dest.setZero();
dest.diagonal().noalias() = m_T_diag;
// The upper diagonal refers to m_T_usub
// The 2nd upper subdiagonal will be zero in RQ
// Compute the RQ matrix
// [m11 m12] points to RQ[i:(i+1), i:(i+1)]
// [0 m22]
//
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
const Index n1 = this->m_n - 1;
for (Index i = 0; i < n1; i++)
{
const Scalar c = this->m_rot_cos.coeff(i);
const Scalar s = this->m_rot_sin.coeff(i);
const Scalar m11 = dest.coeff(i, i),
m12 = m_T_usub.coeff(i),
m22 = m_T_diag.coeff(i + 1);
// Update the diagonal and the lower subdiagonal of dest
dest.coeffRef(i, i) = c * m11 - s * m12;
dest.coeffRef(i + 1, i) = -s * m22;
dest.coeffRef(i + 1, i + 1) = c * m22;
}
// Copy the lower subdiagonal to upper subdiagonal
dest.diagonal(1).noalias() = dest.diagonal(-1);
// Add the shift to the diagonal
dest.diagonal().array() += this->m_shift;
}
};
///
/// @}
///
} // namespace Spectra
#endif // UPPER_HESSENBERG_QR_H

View File

@ -0,0 +1,108 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DENSE_CHOLESKY_H
#define DENSE_CHOLESKY_H
#include <Eigen/Core>
#include <Eigen/Cholesky>
#include <stdexcept>
#include "../Util/CompInfo.h"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the operations related to Cholesky decomposition on a
/// positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular
/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver
/// in the Cholesky decomposition mode.
///
template <typename Scalar, int Uplo = Eigen::Lower>
class DenseCholesky
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Matrix> MapConstMat;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
const Index m_n;
Eigen::LLT<Matrix, Uplo> m_decomp;
int m_info; // status of the decomposition
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
DenseCholesky(ConstGenericMatrix& mat) :
m_n(mat.rows()), m_info(NOT_COMPUTED)
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("DenseCholesky: matrix must be square");
m_decomp.compute(mat);
m_info = (m_decomp.info() == Eigen::Success) ?
SUCCESSFUL :
NUMERICAL_ISSUE;
}
///
/// Returns the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Returns the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
int info() const { return m_info; }
///
/// Performs the lower triangular solving operation \f$y=L^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L) * x_in
void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.matrixL().solve(x);
}
///
/// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L') * x_in
void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.matrixU().solve(x);
}
};
} // namespace Spectra
#endif // DENSE_CHOLESKY_H

View File

@ -0,0 +1,102 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DENSE_GEN_COMPLEX_SHIFT_SOLVE_H
#define DENSE_GEN_COMPLEX_SHIFT_SOLVE_H
#include <Eigen/Core>
#include <Eigen/LU>
#include <stdexcept>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the complex shift-solve operation on a general real matrix \f$A\f$,
/// i.e., calculating \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$ for any complex-valued
/// \f$\sigma\f$ and real-valued vector \f$x\f$. It is mainly used in the
/// GenEigsComplexShiftSolver eigen solver.
///
template <typename Scalar>
class DenseGenComplexShiftSolve
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
typedef std::complex<Scalar> Complex;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
typedef Eigen::PartialPivLU<ComplexMatrix> ComplexSolver;
ConstGenericMatrix m_mat;
const Index m_n;
ComplexSolver m_solver;
ComplexVector m_x_cache;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
DenseGenComplexShiftSolve(ConstGenericMatrix& mat) :
m_mat(mat), m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("DenseGenComplexShiftSolve: matrix must be square");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the complex shift \f$\sigma\f$.
///
/// \param sigmar Real part of \f$\sigma\f$.
/// \param sigmai Imaginary part of \f$\sigma\f$.
///
void set_shift(Scalar sigmar, Scalar sigmai)
{
m_solver.compute(m_mat.template cast<Complex>() - Complex(sigmar, sigmai) * ComplexMatrix::Identity(m_n, m_n));
m_x_cache.resize(m_n);
m_x_cache.setZero();
}
///
/// Perform the complex shift-solve operation
/// \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = Re( inv(A - sigma * I) * x_in )
void perform_op(const Scalar* x_in, Scalar* y_out)
{
m_x_cache.real() = MapConstVec(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(m_x_cache).real();
}
};
} // namespace Spectra
#endif // DENSE_GEN_COMPLEX_SHIFT_SOLVE_H

View File

@ -0,0 +1,80 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DENSE_GEN_MAT_PROD_H
#define DENSE_GEN_MAT_PROD_H
#include <Eigen/Core>
namespace Spectra {
///
/// \defgroup MatOp Matrix Operations
///
/// Define matrix operations on existing matrix objects
///
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// general real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the GenEigsSolver and
/// SymEigsSolver eigen solvers.
///
template <typename Scalar>
class DenseGenMatProd
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
ConstGenericMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
DenseGenMatProd(ConstGenericMatrix& mat) :
m_mat(mat)
{}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat * x;
}
};
} // namespace Spectra
#endif // DENSE_GEN_MAT_PROD_H

View File

@ -0,0 +1,88 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DENSE_GEN_REAL_SHIFT_SOLVE_H
#define DENSE_GEN_REAL_SHIFT_SOLVE_H
#include <Eigen/Core>
#include <Eigen/LU>
#include <stdexcept>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the shift-solve operation on a general real matrix \f$A\f$,
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver.
///
template <typename Scalar>
class DenseGenRealShiftSolve
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
ConstGenericMatrix m_mat;
const Index m_n;
Eigen::PartialPivLU<Matrix> m_solver;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
DenseGenRealShiftSolve(ConstGenericMatrix& mat) :
m_mat(mat), m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("DenseGenRealShiftSolve: matrix must be square");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(Scalar sigma)
{
m_solver.compute(m_mat - sigma * Matrix::Identity(m_n, m_n));
}
///
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * I) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(x);
}
};
} // namespace Spectra
#endif // DENSE_GEN_REAL_SHIFT_SOLVE_H

View File

@ -0,0 +1,73 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DENSE_SYM_MAT_PROD_H
#define DENSE_SYM_MAT_PROD_H
#include <Eigen/Core>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// symmetric real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver.
///
template <typename Scalar, int Uplo = Eigen::Lower>
class DenseSymMatProd
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
ConstGenericMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
DenseSymMatProd(ConstGenericMatrix& mat) :
m_mat(mat)
{}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
}
};
} // namespace Spectra
#endif // DENSE_SYM_MAT_PROD_H

View File

@ -0,0 +1,92 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DENSE_SYM_SHIFT_SOLVE_H
#define DENSE_SYM_SHIFT_SOLVE_H
#include <Eigen/Core>
#include <stdexcept>
#include "../LinAlg/BKLDLT.h"
#include "../Util/CompInfo.h"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the shift-solve operation on a real symmetric matrix \f$A\f$,
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver.
///
template <typename Scalar, int Uplo = Eigen::Lower>
class DenseSymShiftSolve
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
ConstGenericMatrix m_mat;
const int m_n;
BKLDLT<Scalar> m_solver;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
DenseSymShiftSolve(ConstGenericMatrix& mat) :
m_mat(mat), m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("DenseSymShiftSolve: matrix must be square");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(Scalar sigma)
{
m_solver.compute(m_mat, Uplo, sigma);
if (m_solver.info() != SUCCESSFUL)
throw std::invalid_argument("DenseSymShiftSolve: factorization failed with the given shift");
}
///
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * I) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(x);
}
};
} // namespace Spectra
#endif // DENSE_SYM_SHIFT_SOLVE_H

View File

@ -0,0 +1,109 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPARSE_CHOLESKY_H
#define SPARSE_CHOLESKY_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/SparseCholesky>
#include <stdexcept>
#include "../Util/CompInfo.h"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the operations related to Cholesky decomposition on a
/// sparse positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular
/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver
/// in the Cholesky decomposition mode.
///
template <typename Scalar, int Uplo = Eigen::Lower, int Flags = 0, typename StorageIndex = int>
class SparseCholesky
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
const Index m_n;
Eigen::SimplicialLLT<SparseMatrix, Uplo> m_decomp;
int m_info; // status of the decomposition
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
SparseCholesky(ConstGenericSparseMatrix& mat) :
m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("SparseCholesky: matrix must be square");
m_decomp.compute(mat);
m_info = (m_decomp.info() == Eigen::Success) ?
SUCCESSFUL :
NUMERICAL_ISSUE;
}
///
/// Returns the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Returns the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
int info() const { return m_info; }
///
/// Performs the lower triangular solving operation \f$y=L^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L) * x_in
void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.permutationP() * x;
m_decomp.matrixL().solveInPlace(y);
}
///
/// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L') * x_in
void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.matrixU().solve(x);
y = m_decomp.permutationPinv() * y;
}
};
} // namespace Spectra
#endif // SPARSE_CHOLESKY_H

View File

@ -0,0 +1,74 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPARSE_GEN_MAT_PROD_H
#define SPARSE_GEN_MAT_PROD_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// sparse real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the GenEigsSolver and SymEigsSolver
/// eigen solvers.
///
template <typename Scalar, int Flags = 0, typename StorageIndex = int>
class SparseGenMatProd
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
ConstGenericSparseMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
SparseGenMatProd(ConstGenericSparseMatrix& mat) :
m_mat(mat)
{}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat * x;
}
};
} // namespace Spectra
#endif // SPARSE_GEN_MAT_PROD_H

View File

@ -0,0 +1,93 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPARSE_GEN_REAL_SHIFT_SOLVE_H
#define SPARSE_GEN_REAL_SHIFT_SOLVE_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/SparseLU>
#include <stdexcept>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the shift-solve operation on a sparse real matrix \f$A\f$,
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver.
///
template <typename Scalar, int Flags = 0, typename StorageIndex = int>
class SparseGenRealShiftSolve
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
ConstGenericSparseMatrix m_mat;
const int m_n;
Eigen::SparseLU<SparseMatrix> m_solver;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
SparseGenRealShiftSolve(ConstGenericSparseMatrix& mat) :
m_mat(mat), m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("SparseGenRealShiftSolve: matrix must be square");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(Scalar sigma)
{
SparseMatrix I(m_n, m_n);
I.setIdentity();
m_solver.compute(m_mat - sigma * I);
if (m_solver.info() != Eigen::Success)
throw std::invalid_argument("SparseGenRealShiftSolve: factorization failed with the given shift");
}
///
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * I) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(x);
}
};
} // namespace Spectra
#endif // SPARSE_GEN_REAL_SHIFT_SOLVE_H

View File

@ -0,0 +1,100 @@
// Copyright (C) 2017-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPARSE_REGULAR_INVERSE_H
#define SPARSE_REGULAR_INVERSE_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/IterativeLinearSolvers>
#include <stdexcept>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines matrix operations required by the generalized eigen solver
/// in the regular inverse mode. For a sparse and positive definite matrix \f$B\f$,
/// it implements the matrix-vector product \f$y=Bx\f$ and the linear equation
/// solving operation \f$y=B^{-1}x\f$.
///
/// This class is intended to be used with the SymGEigsSolver generalized eigen solver
/// in the regular inverse mode.
///
template <typename Scalar, int Uplo = Eigen::Lower, int Flags = 0, typename StorageIndex = int>
class SparseRegularInverse
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
ConstGenericSparseMatrix m_mat;
const int m_n;
Eigen::ConjugateGradient<SparseMatrix> m_cg;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
SparseRegularInverse(ConstGenericSparseMatrix& mat) :
m_mat(mat), m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("SparseRegularInverse: matrix must be square");
m_cg.compute(mat);
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Perform the solving operation \f$y=B^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(B) * x_in
void solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_cg.solve(x);
}
///
/// Perform the matrix-vector multiplication operation \f$y=Bx\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = B * x_in
void mat_prod(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
}
};
} // namespace Spectra
#endif // SPARSE_REGULAR_INVERSE_H

View File

@ -0,0 +1,73 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPARSE_SYM_MAT_PROD_H
#define SPARSE_SYM_MAT_PROD_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// sparse real symmetric matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver.
///
template <typename Scalar, int Uplo = Eigen::Lower, int Flags = 0, typename StorageIndex = int>
class SparseSymMatProd
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
ConstGenericSparseMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
SparseSymMatProd(ConstGenericSparseMatrix& mat) :
m_mat(mat)
{}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
}
};
} // namespace Spectra
#endif // SPARSE_SYM_MAT_PROD_H

View File

@ -0,0 +1,95 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPARSE_SYM_SHIFT_SOLVE_H
#define SPARSE_SYM_SHIFT_SOLVE_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/SparseLU>
#include <stdexcept>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the shift-solve operation on a sparse real symmetric matrix \f$A\f$,
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver.
///
template <typename Scalar, int Uplo = Eigen::Lower, int Flags = 0, typename StorageIndex = int>
class SparseSymShiftSolve
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
ConstGenericSparseMatrix m_mat;
const int m_n;
Eigen::SparseLU<SparseMatrix> m_solver;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
SparseSymShiftSolve(ConstGenericSparseMatrix& mat) :
m_mat(mat), m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("SparseSymShiftSolve: matrix must be square");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(Scalar sigma)
{
SparseMatrix mat = m_mat.template selfadjointView<Uplo>();
SparseMatrix identity(m_n, m_n);
identity.setIdentity();
mat = mat - sigma * identity;
m_solver.isSymmetric(true);
m_solver.compute(mat);
if (m_solver.info() != Eigen::Success)
throw std::invalid_argument("SparseSymShiftSolve: factorization failed with the given shift");
}
///
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * I) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(x);
}
};
} // namespace Spectra
#endif // SPARSE_SYM_SHIFT_SOLVE_H

View File

@ -0,0 +1,150 @@
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef ARNOLDI_OP_H
#define ARNOLDI_OP_H
#include <Eigen/Core>
#include <cmath> // std::sqrt
namespace Spectra {
///
/// \ingroup Internals
/// @{
///
///
/// \defgroup Operators Operators
///
/// Different types of operators.
///
///
/// \ingroup Operators
///
/// Operators used in the Arnoldi factorization.
///
template <typename Scalar, typename OpType, typename BOpType>
class ArnoldiOp
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
OpType& m_op;
BOpType& m_Bop;
Vector m_cache;
public:
ArnoldiOp(OpType* op, BOpType* Bop) :
m_op(*op), m_Bop(*Bop), m_cache(op->rows())
{}
inline Index rows() const { return m_op.rows(); }
// In generalized eigenvalue problem Ax=lambda*Bx, define the inner product to be <x, y> = x'By.
// For regular eigenvalue problems, it is the usual inner product <x, y> = x'y
// Compute <x, y> = x'By
// x and y are two vectors
template <typename Arg1, typename Arg2>
Scalar inner_product(const Arg1& x, const Arg2& y)
{
m_Bop.mat_prod(y.data(), m_cache.data());
return x.dot(m_cache);
}
// Compute res = <X, y> = X'By
// X is a matrix, y is a vector, res is a vector
template <typename Arg1, typename Arg2>
void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref<Vector> res)
{
m_Bop.mat_prod(y.data(), m_cache.data());
res.noalias() = x.transpose() * m_cache;
}
// B-norm of a vector, ||x||_B = sqrt(x'Bx)
template <typename Arg>
Scalar norm(const Arg& x)
{
using std::sqrt;
return sqrt(inner_product<Arg, Arg>(x, x));
}
// The "A" operator to generate the Krylov subspace
inline void perform_op(const Scalar* x_in, Scalar* y_out)
{
m_op.perform_op(x_in, y_out);
}
};
///
/// \ingroup Operators
///
/// Placeholder for the B-operator when \f$B = I\f$.
///
class IdentityBOp
{};
///
/// \ingroup Operators
///
/// Partial specialization for the case \f$B = I\f$.
///
template <typename Scalar, typename OpType>
class ArnoldiOp<Scalar, OpType, IdentityBOp>
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
OpType& m_op;
public:
ArnoldiOp<Scalar, OpType, IdentityBOp>(OpType* op, IdentityBOp* /*Bop*/) :
m_op(*op)
{}
inline Index rows() const { return m_op.rows(); }
// Compute <x, y> = x'y
// x and y are two vectors
template <typename Arg1, typename Arg2>
Scalar inner_product(const Arg1& x, const Arg2& y) const
{
return x.dot(y);
}
// Compute res = <X, y> = X'y
// X is a matrix, y is a vector, res is a vector
template <typename Arg1, typename Arg2>
void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref<Vector> res) const
{
res.noalias() = x.transpose() * y;
}
// B-norm of a vector. For regular eigenvalue problems it is simply the L2 norm
template <typename Arg>
Scalar norm(const Arg& x)
{
return x.norm();
}
// The "A" operator to generate the Krylov subspace
inline void perform_op(const Scalar* x_in, Scalar* y_out)
{
m_op.perform_op(x_in, y_out);
}
};
///
/// @}
///
} // namespace Spectra
#endif // ARNOLDI_OP_H

View File

@ -0,0 +1,75 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SYM_GEIGS_CHOLESKY_OP_H
#define SYM_GEIGS_CHOLESKY_OP_H
#include <Eigen/Core>
#include "../DenseSymMatProd.h"
#include "../DenseCholesky.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// Cholesky decomposition mode. It calculates \f$y=L^{-1}A(L')^{-1}x\f$ for any
/// vector \f$x\f$, where \f$L\f$ is the Cholesky decomposition of \f$B\f$.
/// This class is intended for internal use.
///
template <typename Scalar = double,
typename OpType = DenseSymMatProd<double>,
typename BOpType = DenseCholesky<double> >
class SymGEigsCholeskyOp
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
OpType& m_op;
BOpType& m_Bop;
Vector m_cache; // temporary working space
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op Pointer to the \f$A\f$ matrix operation object.
/// \param Bop Pointer to the \f$B\f$ matrix operation object.
///
SymGEigsCholeskyOp(OpType& op, BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_Bop.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_Bop.rows(); }
///
/// Perform the matrix operation \f$y=L^{-1}A(L')^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L) * A * inv(L') * x_in
void perform_op(const Scalar* x_in, Scalar* y_out)
{
m_Bop.upper_triangular_solve(x_in, y_out);
m_op.perform_op(y_out, m_cache.data());
m_Bop.lower_triangular_solve(m_cache.data(), y_out);
}
};
} // namespace Spectra
#endif // SYM_GEIGS_CHOLESKY_OP_H

View File

@ -0,0 +1,72 @@
// Copyright (C) 2017-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SYM_GEIGS_REG_INV_OP_H
#define SYM_GEIGS_REG_INV_OP_H
#include <Eigen/Core>
#include "../SparseSymMatProd.h"
#include "../SparseRegularInverse.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// regular inverse mode. This class is intended for internal use.
///
template <typename Scalar = double,
typename OpType = SparseSymMatProd<double>,
typename BOpType = SparseRegularInverse<double> >
class SymGEigsRegInvOp
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
OpType& m_op;
BOpType& m_Bop;
Vector m_cache; // temporary working space
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op Pointer to the \f$A\f$ matrix operation object.
/// \param Bop Pointer to the \f$B\f$ matrix operation object.
///
SymGEigsRegInvOp(OpType& op, BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_Bop.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_Bop.rows(); }
///
/// Perform the matrix operation \f$y=B^{-1}Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(B) * A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out)
{
m_op.perform_op(x_in, m_cache.data());
m_Bop.solve(m_cache.data(), y_out);
}
};
} // namespace Spectra
#endif // SYM_GEIGS_REG_INV_OP_H

448
gtsam/3rdparty/Spectra/SymEigsBase.h vendored Normal file
View File

@ -0,0 +1,448 @@
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SYM_EIGS_BASE_H
#define SYM_EIGS_BASE_H
#include <Eigen/Core>
#include <vector> // std::vector
#include <cmath> // std::abs, std::pow, std::sqrt
#include <algorithm> // std::min, std::copy
#include <stdexcept> // std::invalid_argument
#include "Util/TypeTraits.h"
#include "Util/SelectionRule.h"
#include "Util/CompInfo.h"
#include "Util/SimpleRandom.h"
#include "MatOp/internal/ArnoldiOp.h"
#include "LinAlg/UpperHessenbergQR.h"
#include "LinAlg/TridiagEigen.h"
#include "LinAlg/Lanczos.h"
namespace Spectra {
///
/// \defgroup EigenSolver Eigen Solvers
///
/// Eigen solvers for different types of problems.
///
///
/// \ingroup EigenSolver
///
/// This is the base class for symmetric eigen solvers, mainly for internal use.
/// It is kept here to provide the documentation for member functions of concrete eigen solvers
/// such as SymEigsSolver and SymEigsShiftSolver.
///
template <typename Scalar,
int SelectionRule,
typename OpType,
typename BOpType>
class SymEigsBase
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> Array;
typedef Eigen::Array<bool, Eigen::Dynamic, 1> BoolArray;
typedef Eigen::Map<Matrix> MapMat;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::Map<const Vector> MapConstVec;
typedef ArnoldiOp<Scalar, OpType, BOpType> ArnoldiOpType;
typedef Lanczos<Scalar, ArnoldiOpType> LanczosFac;
protected:
// clang-format off
OpType* m_op; // object to conduct matrix operation,
// e.g. matrix-vector product
const Index m_n; // dimension of matrix A
const Index m_nev; // number of eigenvalues requested
const Index m_ncv; // dimension of Krylov subspace in the Lanczos method
Index m_nmatop; // number of matrix operations called
Index m_niter; // number of restarting iterations
LanczosFac m_fac; // Lanczos factorization
Vector m_ritz_val; // Ritz values
private:
Matrix m_ritz_vec; // Ritz vectors
Vector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates
BoolArray m_ritz_conv; // indicator of the convergence of Ritz values
int m_info; // status of the computation
const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow
// ~= 1e-307 for the "double" type
const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type
const Scalar m_eps23; // m_eps^(2/3), used to test the convergence
// clang-format on
// Implicitly restarted Lanczos factorization
void restart(Index k)
{
if (k >= m_ncv)
return;
TridiagQR<Scalar> decomp(m_ncv);
Matrix Q = Matrix::Identity(m_ncv, m_ncv);
for (Index i = k; i < m_ncv; i++)
{
// QR decomposition of H-mu*I, mu is the shift
decomp.compute(m_fac.matrix_H(), m_ritz_val[i]);
// Q -> Q * Qi
decomp.apply_YQ(Q);
// H -> Q'HQ
// Since QR = H - mu * I, we have H = QR + mu * I
// and therefore Q'HQ = RQ + mu * I
m_fac.compress_H(decomp);
}
m_fac.compress_V(Q);
m_fac.factorize_from(k, m_ncv, m_nmatop);
retrieve_ritzpair();
}
// Calculates the number of converged Ritz values
Index num_converged(Scalar tol)
{
// thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value
Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23);
Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm();
// Converged "wanted" Ritz values
m_ritz_conv = (resid < thresh);
return m_ritz_conv.cast<Index>().sum();
}
// Returns the adjusted nev for restarting
Index nev_adjusted(Index nconv)
{
using std::abs;
Index nev_new = m_nev;
for (Index i = m_nev; i < m_ncv; i++)
if (abs(m_ritz_est[i]) < m_near_0)
nev_new++;
// Adjust nev_new, according to dsaup2.f line 677~684 in ARPACK
nev_new += std::min(nconv, (m_ncv - nev_new) / 2);
if (nev_new == 1 && m_ncv >= 6)
nev_new = m_ncv / 2;
else if (nev_new == 1 && m_ncv > 2)
nev_new = 2;
if (nev_new > m_ncv - 1)
nev_new = m_ncv - 1;
return nev_new;
}
// Retrieves and sorts Ritz values and Ritz vectors
void retrieve_ritzpair()
{
TridiagEigen<Scalar> decomp(m_fac.matrix_H());
const Vector& evals = decomp.eigenvalues();
const Matrix& evecs = decomp.eigenvectors();
SortEigenvalue<Scalar, SelectionRule> sorting(evals.data(), evals.size());
std::vector<int> ind = sorting.index();
// For BOTH_ENDS, the eigenvalues are sorted according
// to the LARGEST_ALGE rule, so we need to move those smallest
// values to the left
// The order would be
// Largest => Smallest => 2nd largest => 2nd smallest => ...
// We keep this order since the first k values will always be
// the wanted collection, no matter k is nev_updated (used in restart())
// or is nev (used in sort_ritzpair())
if (SelectionRule == BOTH_ENDS)
{
std::vector<int> ind_copy(ind);
for (Index i = 0; i < m_ncv; i++)
{
// If i is even, pick values from the left (large values)
// If i is odd, pick values from the right (small values)
if (i % 2 == 0)
ind[i] = ind_copy[i / 2];
else
ind[i] = ind_copy[m_ncv - 1 - i / 2];
}
}
// Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively
for (Index i = 0; i < m_ncv; i++)
{
m_ritz_val[i] = evals[ind[i]];
m_ritz_est[i] = evecs(m_ncv - 1, ind[i]);
}
for (Index i = 0; i < m_nev; i++)
{
m_ritz_vec.col(i).noalias() = evecs.col(ind[i]);
}
}
protected:
// Sorts the first nev Ritz pairs in the specified order
// This is used to return the final results
virtual void sort_ritzpair(int sort_rule)
{
// First make sure that we have a valid index vector
SortEigenvalue<Scalar, LARGEST_ALGE> sorting(m_ritz_val.data(), m_nev);
std::vector<int> ind = sorting.index();
switch (sort_rule)
{
case LARGEST_ALGE:
break;
case LARGEST_MAGN:
{
SortEigenvalue<Scalar, LARGEST_MAGN> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
case SMALLEST_ALGE:
{
SortEigenvalue<Scalar, SMALLEST_ALGE> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
case SMALLEST_MAGN:
{
SortEigenvalue<Scalar, SMALLEST_MAGN> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
default:
throw std::invalid_argument("unsupported sorting rule");
}
Vector new_ritz_val(m_ncv);
Matrix new_ritz_vec(m_ncv, m_nev);
BoolArray new_ritz_conv(m_nev);
for (Index i = 0; i < m_nev; i++)
{
new_ritz_val[i] = m_ritz_val[ind[i]];
new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]);
new_ritz_conv[i] = m_ritz_conv[ind[i]];
}
m_ritz_val.swap(new_ritz_val);
m_ritz_vec.swap(new_ritz_vec);
m_ritz_conv.swap(new_ritz_conv);
}
public:
/// \cond
SymEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) :
m_op(op),
m_n(m_op->rows()),
m_nev(nev),
m_ncv(ncv > m_n ? m_n : ncv),
m_nmatop(0),
m_niter(0),
m_fac(ArnoldiOpType(op, Bop), m_ncv),
m_info(NOT_COMPUTED),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
m_eps(Eigen::NumTraits<Scalar>::epsilon()),
m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3))
{
if (nev < 1 || nev > m_n - 1)
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix");
if (ncv <= nev || ncv > m_n)
throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix");
}
///
/// Virtual destructor
///
virtual ~SymEigsBase() {}
/// \endcond
///
/// Initializes the solver by providing an initial residual vector.
///
/// \param init_resid Pointer to the initial residual vector.
///
/// **Spectra** (and also **ARPACK**) uses an iterative algorithm
/// to find eigenvalues. This function allows the user to provide the initial
/// residual vector.
///
void init(const Scalar* init_resid)
{
// Reset all matrices/vectors to zero
m_ritz_val.resize(m_ncv);
m_ritz_vec.resize(m_ncv, m_nev);
m_ritz_est.resize(m_ncv);
m_ritz_conv.resize(m_nev);
m_ritz_val.setZero();
m_ritz_vec.setZero();
m_ritz_est.setZero();
m_ritz_conv.setZero();
m_nmatop = 0;
m_niter = 0;
// Initialize the Lanczos factorization
MapConstVec v0(init_resid, m_n);
m_fac.init(v0, m_nmatop);
}
///
/// Initializes the solver by providing a random initial residual vector.
///
/// This overloaded function generates a random initial residual vector
/// (with a fixed random seed) for the algorithm. Elements in the vector
/// follow independent Uniform(-0.5, 0.5) distribution.
///
void init()
{
SimpleRandom<Scalar> rng(0);
Vector init_resid = rng.random_vec(m_n);
init(init_resid.data());
}
///
/// Conducts the major computation procedure.
///
/// \param maxit Maximum number of iterations allowed in the algorithm.
/// \param tol Precision parameter for the calculated eigenvalues.
/// \param sort_rule Rule to sort the eigenvalues and eigenvectors.
/// Supported values are
/// `Spectra::LARGEST_ALGE`, `Spectra::LARGEST_MAGN`,
/// `Spectra::SMALLEST_ALGE` and `Spectra::SMALLEST_MAGN`,
/// for example `LARGEST_ALGE` indicates that largest eigenvalues
/// come first. Note that this argument is only used to
/// **sort** the final result, and the **selection** rule
/// (e.g. selecting the largest or smallest eigenvalues in the
/// full spectrum) is specified by the template parameter
/// `SelectionRule` of SymEigsSolver.
///
/// \return Number of converged eigenvalues.
///
Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_ALGE)
{
// The m-step Lanczos factorization
m_fac.factorize_from(1, m_ncv, m_nmatop);
retrieve_ritzpair();
// Restarting
Index i, nconv = 0, nev_adj;
for (i = 0; i < maxit; i++)
{
nconv = num_converged(tol);
if (nconv >= m_nev)
break;
nev_adj = nev_adjusted(nconv);
restart(nev_adj);
}
// Sorting results
sort_ritzpair(sort_rule);
m_niter += i + 1;
m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING;
return std::min(m_nev, nconv);
}
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
int info() const { return m_info; }
///
/// Returns the number of iterations used in the computation.
///
Index num_iterations() const { return m_niter; }
///
/// Returns the number of matrix operations used in the computation.
///
Index num_operations() const { return m_nmatop; }
///
/// Returns the converged eigenvalues.
///
/// \return A vector containing the eigenvalues.
/// Returned vector type will be `Eigen::Vector<Scalar, ...>`, depending on
/// the template parameter `Scalar` defined.
///
Vector eigenvalues() const
{
const Index nconv = m_ritz_conv.cast<Index>().sum();
Vector res(nconv);
if (!nconv)
return res;
Index j = 0;
for (Index i = 0; i < m_nev; i++)
{
if (m_ritz_conv[i])
{
res[j] = m_ritz_val[i];
j++;
}
}
return res;
}
///
/// Returns the eigenvectors associated with the converged eigenvalues.
///
/// \param nvec The number of eigenvectors to return.
///
/// \return A matrix containing the eigenvectors.
/// Returned matrix type will be `Eigen::Matrix<Scalar, ...>`,
/// depending on the template parameter `Scalar` defined.
///
virtual Matrix eigenvectors(Index nvec) const
{
const Index nconv = m_ritz_conv.cast<Index>().sum();
nvec = std::min(nvec, nconv);
Matrix res(m_n, nvec);
if (!nvec)
return res;
Matrix ritz_vec_conv(m_ncv, nvec);
Index j = 0;
for (Index i = 0; i < m_nev && j < nvec; i++)
{
if (m_ritz_conv[i])
{
ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i);
j++;
}
}
res.noalias() = m_fac.matrix_V() * ritz_vec_conv;
return res;
}
///
/// Returns all converged eigenvectors.
///
virtual Matrix eigenvectors() const
{
return eigenvectors(m_nev);
}
};
} // namespace Spectra
#endif // SYM_EIGS_BASE_H

View File

@ -0,0 +1,203 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SYM_EIGS_SHIFT_SOLVER_H
#define SYM_EIGS_SHIFT_SOLVER_H
#include <Eigen/Core>
#include "SymEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseSymShiftSolve.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for real symmetric matrices using
/// the **shift-and-invert mode**. The background information of the symmetric
/// eigen solver is documented in the SymEigsSolver class. Here we focus on
/// explaining the shift-and-invert mode.
///
/// The shift-and-invert mode is based on the following fact:
/// If \f$\lambda\f$ and \f$x\f$ are a pair of eigenvalue and eigenvector of
/// matrix \f$A\f$, such that \f$Ax=\lambda x\f$, then for any \f$\sigma\f$,
/// we have
/// \f[(A-\sigma I)^{-1}x=\nu x\f]
/// where
/// \f[\nu=\frac{1}{\lambda-\sigma}\f]
/// which indicates that \f$(\nu, x)\f$ is an eigenpair of the matrix
/// \f$(A-\sigma I)^{-1}\f$.
///
/// Therefore, if we pass the matrix operation \f$(A-\sigma I)^{-1}y\f$
/// (rather than \f$Ay\f$) to the eigen solver, then we would get the desired
/// values of \f$\nu\f$, and \f$\lambda\f$ can also be easily obtained by noting
/// that \f$\lambda=\sigma+\nu^{-1}\f$.
///
/// The reason why we need this type of manipulation is that
/// the algorithm of **Spectra** (and also **ARPACK**)
/// is good at finding eigenvalues with large magnitude, but may fail in looking
/// for eigenvalues that are close to zero. However, if we really need them, we
/// can set \f$\sigma=0\f$, find the largest eigenvalues of \f$A^{-1}\f$, and then
/// transform back to \f$\lambda\f$, since in this case largest values of \f$\nu\f$
/// implies smallest values of \f$\lambda\f$.
///
/// To summarize, in the shift-and-invert mode, the selection rule will apply to
/// \f$\nu=1/(\lambda-\sigma)\f$ rather than \f$\lambda\f$. So a selection rule
/// of `LARGEST_MAGN` combined with shift \f$\sigma\f$ will find eigenvalues of
/// \f$A\f$ that are closest to \f$\sigma\f$. But note that the eigenvalues()
/// method will always return the eigenvalues in the original problem (i.e.,
/// returning \f$\lambda\f$ rather than \f$\nu\f$), and eigenvectors are the
/// same for both the original problem and the shifted-and-inverted problem.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the shifted-and-inverted eigenvalues.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseSymShiftSolve and
/// SparseSymShiftSolve, or define their
/// own that implements all the public member functions as in
/// DenseSymShiftSolve.
///
/// Below is an example that illustrates the use of the shift-and-invert mode:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsShiftSolver.h>
/// // <Spectra/MatOp/DenseSymShiftSolve.h> is implicitly included
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // A size-10 diagonal matrix with elements 1, 2, ..., 10
/// Eigen::MatrixXd M = Eigen::MatrixXd::Zero(10, 10);
/// for(int i = 0; i < M.rows(); i++)
/// M(i, i) = i + 1;
///
/// // Construct matrix operation object using the wrapper class
/// DenseSymShiftSolve<double> op(M);
///
/// // Construct eigen solver object with shift 0
/// // This will find eigenvalues that are closest to 0
/// SymEigsShiftSolver< double, LARGEST_MAGN,
/// DenseSymShiftSolve<double> > eigs(&op, 3, 6, 0.0);
///
/// eigs.init();
/// eigs.compute();
/// if(eigs.info() == SUCCESSFUL)
/// {
/// Eigen::VectorXd evalues = eigs.eigenvalues();
/// // Will get (3.0, 2.0, 1.0)
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
/// }
///
/// return 0;
/// }
/// \endcode
///
/// Also an example for user-supplied matrix shift-solve operation class:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsShiftSolver.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// // M = diag(1, 2, ..., 10)
/// class MyDiagonalTenShiftSolve
/// {
/// private:
/// double sigma_;
/// public:
/// int rows() { return 10; }
/// int cols() { return 10; }
/// void set_shift(double sigma) { sigma_ = sigma; }
/// // y_out = inv(A - sigma * I) * x_in
/// // inv(A - sigma * I) = diag(1/(1-sigma), 1/(2-sigma), ...)
/// void perform_op(double *x_in, double *y_out)
/// {
/// for(int i = 0; i < rows(); i++)
/// {
/// y_out[i] = x_in[i] / (i + 1 - sigma_);
/// }
/// }
/// };
///
/// int main()
/// {
/// MyDiagonalTenShiftSolve op;
/// // Find three eigenvalues that are closest to 3.14
/// SymEigsShiftSolver<double, LARGEST_MAGN,
/// MyDiagonalTenShiftSolve> eigs(&op, 3, 6, 3.14);
/// eigs.init();
/// eigs.compute();
/// if(eigs.info() == SUCCESSFUL)
/// {
/// Eigen::VectorXd evalues = eigs.eigenvalues();
/// // Will get (4.0, 3.0, 2.0)
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
/// }
///
/// return 0;
/// }
/// \endcode
///
template <typename Scalar = double,
int SelectionRule = LARGEST_MAGN,
typename OpType = DenseSymShiftSolve<double> >
class SymEigsShiftSolver : public SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
{
private:
typedef Eigen::Index Index;
typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> Array;
const Scalar m_sigma;
// First transform back the Ritz values, and then sort
void sort_ritzpair(int sort_rule)
{
Array m_ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma;
this->m_ritz_val.head(this->m_nev) = m_ritz_val_org;
SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a eigen solver object using the shift-and-invert mode.
///
/// \param op Pointer to the matrix operation object, which should implement
/// the shift-solve operation of \f$A\f$: calculating
/// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseSymShiftSolve, or
/// define their own that implements all the public member functions
/// as in DenseSymShiftSolve.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv_` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
/// \param sigma The value of the shift.
///
SymEigsShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) :
SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv),
m_sigma(sigma)
{
this->m_op->set_shift(m_sigma);
}
};
} // namespace Spectra
#endif // SYM_EIGS_SHIFT_SOLVER_H

171
gtsam/3rdparty/Spectra/SymEigsSolver.h vendored Normal file
View File

@ -0,0 +1,171 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SYM_EIGS_SOLVER_H
#define SYM_EIGS_SOLVER_H
#include <Eigen/Core>
#include "SymEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseSymMatProd.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for real symmetric matrices, i.e.,
/// to solve \f$Ax=\lambda x\f$ where \f$A\f$ is symmetric.
///
/// **Spectra** is designed to calculate a specified number (\f$k\f$)
/// of eigenvalues of a large square matrix (\f$A\f$). Usually \f$k\f$ is much
/// less than the size of the matrix (\f$n\f$), so that only a few eigenvalues
/// and eigenvectors are computed.
///
/// Rather than providing the whole \f$A\f$ matrix, the algorithm only requires
/// the matrix-vector multiplication operation of \f$A\f$. Therefore, users of
/// this solver need to supply a class that computes the result of \f$Av\f$
/// for any given vector \f$v\f$. The name of this class should be given to
/// the template parameter `OpType`, and instance of this class passed to
/// the constructor of SymEigsSolver.
///
/// If the matrix \f$A\f$ is already stored as a matrix object in **Eigen**,
/// for example `Eigen::MatrixXd`, then there is an easy way to construct such
/// matrix operation class, by using the built-in wrapper class DenseSymMatProd
/// which wraps an existing matrix object in **Eigen**. This is also the
/// default template parameter for SymEigsSolver. For sparse matrices, the
/// wrapper class SparseSymMatProd can be used similarly.
///
/// If the users need to define their own matrix-vector multiplication operation
/// class, it should implement all the public member functions as in DenseSymMatProd.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the requested eigenvalues, for example `LARGEST_MAGN`
/// to retrieve eigenvalues with the largest magnitude.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their
/// own that implements all the public member functions as in
/// DenseSymMatProd.
///
/// Below is an example that demonstrates the usage of this class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsSolver.h>
/// // <Spectra/MatOp/DenseSymMatProd.h> is implicitly included
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to calculate the eigenvalues of M
/// Eigen::MatrixXd A = Eigen::MatrixXd::Random(10, 10);
/// Eigen::MatrixXd M = A + A.transpose();
///
/// // Construct matrix operation object using the wrapper class DenseSymMatProd
/// DenseSymMatProd<double> op(M);
///
/// // Construct eigen solver object, requesting the largest three eigenvalues
/// SymEigsSolver< double, LARGEST_ALGE, DenseSymMatProd<double> > eigs(&op, 3, 6);
///
/// // Initialize and compute
/// eigs.init();
/// int nconv = eigs.compute();
///
/// // Retrieve results
/// Eigen::VectorXd evalues;
/// if(eigs.info() == SUCCESSFUL)
/// evalues = eigs.eigenvalues();
///
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
///
/// return 0;
/// }
/// \endcode
///
/// And here is an example for user-supplied matrix operation class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsSolver.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// // M = diag(1, 2, ..., 10)
/// class MyDiagonalTen
/// {
/// public:
/// int rows() { return 10; }
/// int cols() { return 10; }
/// // y_out = M * x_in
/// void perform_op(double *x_in, double *y_out)
/// {
/// for(int i = 0; i < rows(); i++)
/// {
/// y_out[i] = x_in[i] * (i + 1);
/// }
/// }
/// };
///
/// int main()
/// {
/// MyDiagonalTen op;
/// SymEigsSolver<double, LARGEST_ALGE, MyDiagonalTen> eigs(&op, 3, 6);
/// eigs.init();
/// eigs.compute();
/// if(eigs.info() == SUCCESSFUL)
/// {
/// Eigen::VectorXd evalues = eigs.eigenvalues();
/// // Will get (10, 9, 8)
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
/// }
///
/// return 0;
/// }
/// \endcode
///
template <typename Scalar = double,
int SelectionRule = LARGEST_MAGN,
typename OpType = DenseSymMatProd<double> >
class SymEigsSolver : public SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
{
private:
typedef Eigen::Index Index;
public:
///
/// Constructor to create a solver object.
///
/// \param op Pointer to the matrix operation object, which should implement
/// the matrix-vector multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseSymMatProd, or
/// define their own that implements all the public member functions
/// as in DenseSymMatProd.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
///
SymEigsSolver(OpType* op, Index nev, Index ncv) :
SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv)
{}
};
} // namespace Spectra
#endif // SYM_EIGS_SOLVER_H

326
gtsam/3rdparty/Spectra/SymGEigsSolver.h vendored Normal file
View File

@ -0,0 +1,326 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SYM_GEIGS_SOLVER_H
#define SYM_GEIGS_SOLVER_H
#include "SymEigsBase.h"
#include "Util/GEigsMode.h"
#include "MatOp/internal/SymGEigsCholeskyOp.h"
#include "MatOp/internal/SymGEigsRegInvOp.h"
namespace Spectra {
///
/// \defgroup GEigenSolver Generalized Eigen Solvers
///
/// Generalized eigen solvers for different types of problems.
///
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ is symmetric and
/// \f$B\f$ is positive definite.
///
/// There are two modes of this solver, specified by the template parameter
/// GEigsMode. See the pages for the specialized classes for details.
/// - The Cholesky mode assumes that \f$B\f$ can be factorized using Cholesky
/// decomposition, which is the preferred mode when the decomposition is
/// available. (This can be easily done in Eigen using the dense or sparse
/// Cholesky solver.)
/// See \ref SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_CHOLESKY> "SymGEigsSolver (Cholesky mode)" for more details.
/// - The regular inverse mode requires the matrix-vector product \f$Bv\f$ and the
/// linear equation solving operation \f$B^{-1}v\f$. This mode should only be
/// used when the Cholesky decomposition of \f$B\f$ is hard to implement, or
/// when computing \f$B^{-1}v\f$ is much faster than the Cholesky decomposition.
/// See \ref SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_REGULAR_INVERSE> "SymGEigsSolver (Regular inverse mode)" for more details.
// Empty class template
template <typename Scalar,
int SelectionRule,
typename OpType,
typename BOpType,
int GEigsMode>
class SymGEigsSolver
{};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices using Cholesky decomposition, i.e., to solve \f$Ax=\lambda Bx\f$
/// where \f$A\f$ is symmetric and \f$B\f$ is positive definite with the Cholesky
/// decomposition \f$B=LL'\f$.
///
/// This solver requires two matrix operation objects: one for \f$A\f$ that implements
/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the lower
/// and upper triangular solving \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$.
///
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation
/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and
/// the second operation can be created using the DenseCholesky or SparseCholesky
/// classes. If the users need to define their own operation classes, then they
/// should implement all the public member functions as in those built-in classes.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the requested eigenvalues, for example `LARGEST_MAGN`
/// to retrieve eigenvalues with the largest magnitude.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their
/// own that implements all the public member functions as in
/// DenseSymMatProd.
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
/// use the wrapper classes such as DenseCholesky and
/// SparseCholesky, or define their
/// own that implements all the public member functions as in
/// DenseCholesky.
/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEIGS_CHOLESKY.
///
/// Below is an example that demonstrates the usage of this class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Eigen/SparseCore>
/// #include <Eigen/Eigenvalues>
/// #include <Spectra/SymGEigsSolver.h>
/// #include <Spectra/MatOp/DenseSymMatProd.h>
/// #include <Spectra/MatOp/SparseCholesky.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to solve the generalized eigenvalue problem A * x = lambda * B * x
/// const int n = 100;
///
/// // Define the A matrix
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n);
/// Eigen::MatrixXd A = M + M.transpose();
///
/// // Define the B matrix, a band matrix with 2 on the diagonal and 1 on the subdiagonals
/// Eigen::SparseMatrix<double> B(n, n);
/// B.reserve(Eigen::VectorXi::Constant(n, 3));
/// for(int i = 0; i < n; i++)
/// {
/// B.insert(i, i) = 2.0;
/// if(i > 0)
/// B.insert(i - 1, i) = 1.0;
/// if(i < n - 1)
/// B.insert(i + 1, i) = 1.0;
/// }
///
/// // Construct matrix operation object using the wrapper classes
/// DenseSymMatProd<double> op(A);
/// SparseCholesky<double> Bop(B);
///
/// // Construct generalized eigen solver object, requesting the largest three generalized eigenvalues
/// SymGEigsSolver<double, LARGEST_ALGE, DenseSymMatProd<double>, SparseCholesky<double>, GEIGS_CHOLESKY>
/// geigs(&op, &Bop, 3, 6);
///
/// // Initialize and compute
/// geigs.init();
/// int nconv = geigs.compute();
///
/// // Retrieve results
/// Eigen::VectorXd evalues;
/// Eigen::MatrixXd evecs;
/// if(geigs.info() == SUCCESSFUL)
/// {
/// evalues = geigs.eigenvalues();
/// evecs = geigs.eigenvectors();
/// }
///
/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl;
/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl;
///
/// // Verify results using the generalized eigen solver in Eigen
/// Eigen::MatrixXd Bdense = B;
/// Eigen::GeneralizedSelfAdjointEigenSolver<Eigen::MatrixXd> es(A, Bdense);
///
/// std::cout << "Generalized eigenvalues:\n" << es.eigenvalues().tail(3) << std::endl;
/// std::cout << "Generalized eigenvectors:\n" << es.eigenvectors().rightCols(3).topRows(10) << std::endl;
///
/// return 0;
/// }
/// \endcode
// Partial specialization for GEigsMode = GEIGS_CHOLESKY
template <typename Scalar,
int SelectionRule,
typename OpType,
typename BOpType>
class SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_CHOLESKY> :
public SymEigsBase<Scalar, SelectionRule, SymGEigsCholeskyOp<Scalar, OpType, BOpType>, IdentityBOp>
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
BOpType* m_Bop;
public:
///
/// Constructor to create a solver object.
///
/// \param op Pointer to the \f$A\f$ matrix operation object. It
/// should implement the matrix-vector multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper classes such as DenseSymMatProd, or
/// define their own that implements all the public member functions
/// as in DenseSymMatProd.
/// \param Bop Pointer to the \f$B\f$ matrix operation object. It
/// represents a Cholesky decomposition of \f$B\f$, and should
/// implement the lower and upper triangular solving operations:
/// calculating \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$ for any vector
/// \f$v\f$, where \f$LL'=B\f$. Users could either
/// create the object from the wrapper classes such as DenseCholesky, or
/// define their own that implements all the public member functions
/// as in DenseCholesky.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
///
SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) :
SymEigsBase<Scalar, SelectionRule, SymGEigsCholeskyOp<Scalar, OpType, BOpType>, IdentityBOp>(
new SymGEigsCholeskyOp<Scalar, OpType, BOpType>(*op, *Bop), NULL, nev, ncv),
m_Bop(Bop)
{}
/// \cond
~SymGEigsSolver()
{
// m_op contains the constructed SymGEigsCholeskyOp object
delete this->m_op;
}
Matrix eigenvectors(Index nvec) const
{
Matrix res = SymEigsBase<Scalar, SelectionRule, SymGEigsCholeskyOp<Scalar, OpType, BOpType>, IdentityBOp>::eigenvectors(nvec);
Vector tmp(res.rows());
const Index nconv = res.cols();
for (Index i = 0; i < nconv; i++)
{
m_Bop->upper_triangular_solve(&res(0, i), tmp.data());
res.col(i).noalias() = tmp;
}
return res;
}
Matrix eigenvectors() const
{
return SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_CHOLESKY>::eigenvectors(this->m_nev);
}
/// \endcond
};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices in the regular inverse mode, i.e., to solve \f$Ax=\lambda Bx\f$
/// where \f$A\f$ is symmetric, and \f$B\f$ is positive definite with the operations
/// defined below.
///
/// This solver requires two matrix operation objects: one for \f$A\f$ that implements
/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the
/// matrix-vector product \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$.
///
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation
/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and
/// the second operation can be created using the SparseRegularInverse class. There is no
/// wrapper class for a dense \f$B\f$ matrix since in this case the Cholesky mode
/// is always preferred. If the users need to define their own operation classes, then they
/// should implement all the public member functions as in those built-in classes.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the requested eigenvalues, for example `LARGEST_MAGN`
/// to retrieve eigenvalues with the largest magnitude.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their
/// own that implements all the public member functions as in
/// DenseSymMatProd.
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
/// use the wrapper class SparseRegularInverse, or define their
/// own that implements all the public member functions as in
/// SparseRegularInverse.
/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEIGS_REGULAR_INVERSE.
///
// Partial specialization for GEigsMode = GEIGS_REGULAR_INVERSE
template <typename Scalar,
int SelectionRule,
typename OpType,
typename BOpType>
class SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_REGULAR_INVERSE> :
public SymEigsBase<Scalar, SelectionRule, SymGEigsRegInvOp<Scalar, OpType, BOpType>, BOpType>
{
private:
typedef Eigen::Index Index;
public:
///
/// Constructor to create a solver object.
///
/// \param op Pointer to the \f$A\f$ matrix operation object. It
/// should implement the matrix-vector multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper classes such as DenseSymMatProd, or
/// define their own that implements all the public member functions
/// as in DenseSymMatProd.
/// \param Bop Pointer to the \f$B\f$ matrix operation object. It should
/// implement the multiplication operation \f$Bv\f$ and the linear equation
/// solving operation \f$B^{-1}v\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class SparseRegularInverse, or
/// define their own that implements all the public member functions
/// as in SparseRegularInverse.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
///
SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) :
SymEigsBase<Scalar, SelectionRule, SymGEigsRegInvOp<Scalar, OpType, BOpType>, BOpType>(
new SymGEigsRegInvOp<Scalar, OpType, BOpType>(*op, *Bop), Bop, nev, ncv)
{}
/// \cond
~SymGEigsSolver()
{
// m_op contains the constructed SymGEigsRegInvOp object
delete this->m_op;
}
/// \endcond
};
} // namespace Spectra
#endif // SYM_GEIGS_SOLVER_H

35
gtsam/3rdparty/Spectra/Util/CompInfo.h vendored Normal file
View File

@ -0,0 +1,35 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef COMP_INFO_H
#define COMP_INFO_H
namespace Spectra {
///
/// \ingroup Enumerations
///
/// The enumeration to report the status of computation.
///
enum COMPUTATION_INFO
{
SUCCESSFUL = 0, ///< Computation was successful.
NOT_COMPUTED, ///< Used in eigen solvers, indicating that computation
///< has not been conducted. Users should call
///< the `compute()` member function of solvers.
NOT_CONVERGING, ///< Used in eigen solvers, indicating that some eigenvalues
///< did not converge. The `compute()`
///< function returns the number of converged eigenvalues.
NUMERICAL_ISSUE ///< Used in Cholesky decomposition, indicating that the
///< matrix is not positive definite.
};
} // namespace Spectra
#endif // COMP_INFO_H

32
gtsam/3rdparty/Spectra/Util/GEigsMode.h vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef GEIGS_MODE_H
#define GEIGS_MODE_H
namespace Spectra {
///
/// \ingroup Enumerations
///
/// The enumeration to specify the mode of generalized eigenvalue solver.
///
enum GEIGS_MODE
{
GEIGS_CHOLESKY = 0, ///< Using Cholesky decomposition to solve generalized eigenvalues.
GEIGS_REGULAR_INVERSE, ///< Regular inverse mode for generalized eigenvalue solver.
GEIGS_SHIFT_INVERT, ///< Shift-and-invert mode for generalized eigenvalue solver.
GEIGS_BUCKLING, ///< Buckling mode for generalized eigenvalue solver.
GEIGS_CAYLEY ///< Cayley transformation mode for generalized eigenvalue solver.
};
} // namespace Spectra
#endif // GEIGS_MODE_H

View File

@ -0,0 +1,275 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SELECTION_RULE_H
#define SELECTION_RULE_H
#include <vector> // std::vector
#include <cmath> // std::abs
#include <algorithm> // std::sort
#include <complex> // std::complex
#include <utility> // std::pair
#include <stdexcept> // std::invalid_argument
namespace Spectra {
///
/// \defgroup Enumerations
///
/// Enumeration types for the selection rule of eigenvalues.
///
///
/// \ingroup Enumerations
///
/// The enumeration of selection rules of desired eigenvalues.
///
enum SELECT_EIGENVALUE
{
LARGEST_MAGN = 0, ///< Select eigenvalues with largest magnitude. Magnitude
///< means the absolute value for real numbers and norm for
///< complex numbers. Applies to both symmetric and general
///< eigen solvers.
LARGEST_REAL, ///< Select eigenvalues with largest real part. Only for general eigen solvers.
LARGEST_IMAG, ///< Select eigenvalues with largest imaginary part (in magnitude). Only for general eigen solvers.
LARGEST_ALGE, ///< Select eigenvalues with largest algebraic value, considering
///< any negative sign. Only for symmetric eigen solvers.
SMALLEST_MAGN, ///< Select eigenvalues with smallest magnitude. Applies to both symmetric and general
///< eigen solvers.
SMALLEST_REAL, ///< Select eigenvalues with smallest real part. Only for general eigen solvers.
SMALLEST_IMAG, ///< Select eigenvalues with smallest imaginary part (in magnitude). Only for general eigen solvers.
SMALLEST_ALGE, ///< Select eigenvalues with smallest algebraic value. Only for symmetric eigen solvers.
BOTH_ENDS ///< Select eigenvalues half from each end of the spectrum. When
///< `nev` is odd, compute more from the high end. Only for symmetric eigen solvers.
};
///
/// \ingroup Enumerations
///
/// The enumeration of selection rules of desired eigenvalues. Alias for `SELECT_EIGENVALUE`.
///
enum SELECT_EIGENVALUE_ALIAS
{
WHICH_LM = 0, ///< Alias for `LARGEST_MAGN`
WHICH_LR, ///< Alias for `LARGEST_REAL`
WHICH_LI, ///< Alias for `LARGEST_IMAG`
WHICH_LA, ///< Alias for `LARGEST_ALGE`
WHICH_SM, ///< Alias for `SMALLEST_MAGN`
WHICH_SR, ///< Alias for `SMALLEST_REAL`
WHICH_SI, ///< Alias for `SMALLEST_IMAG`
WHICH_SA, ///< Alias for `SMALLEST_ALGE`
WHICH_BE ///< Alias for `BOTH_ENDS`
};
/// \cond
// Get the element type of a "scalar"
// ElemType<double> => double
// ElemType< std::complex<double> > => double
template <typename T>
class ElemType
{
public:
typedef T type;
};
template <typename T>
class ElemType<std::complex<T> >
{
public:
typedef T type;
};
// When comparing eigenvalues, we first calculate the "target"
// to sort. For example, if we want to choose the eigenvalues with
// largest magnitude, the target will be -abs(x).
// The minus sign is due to the fact that std::sort() sorts in ascending order.
// Default target: throw an exception
template <typename Scalar, int SelectionRule>
class SortingTarget
{
public:
static typename ElemType<Scalar>::type get(const Scalar& val)
{
using std::abs;
throw std::invalid_argument("incompatible selection rule");
return -abs(val);
}
};
// Specialization for LARGEST_MAGN
// This covers [float, double, complex] x [LARGEST_MAGN]
template <typename Scalar>
class SortingTarget<Scalar, LARGEST_MAGN>
{
public:
static typename ElemType<Scalar>::type get(const Scalar& val)
{
using std::abs;
return -abs(val);
}
};
// Specialization for LARGEST_REAL
// This covers [complex] x [LARGEST_REAL]
template <typename RealType>
class SortingTarget<std::complex<RealType>, LARGEST_REAL>
{
public:
static RealType get(const std::complex<RealType>& val)
{
return -val.real();
}
};
// Specialization for LARGEST_IMAG
// This covers [complex] x [LARGEST_IMAG]
template <typename RealType>
class SortingTarget<std::complex<RealType>, LARGEST_IMAG>
{
public:
static RealType get(const std::complex<RealType>& val)
{
using std::abs;
return -abs(val.imag());
}
};
// Specialization for LARGEST_ALGE
// This covers [float, double] x [LARGEST_ALGE]
template <typename Scalar>
class SortingTarget<Scalar, LARGEST_ALGE>
{
public:
static Scalar get(const Scalar& val)
{
return -val;
}
};
// Here BOTH_ENDS is the same as LARGEST_ALGE, but
// we need some additional steps, which are done in
// SymEigsSolver.h => retrieve_ritzpair().
// There we move the smallest values to the proper locations.
template <typename Scalar>
class SortingTarget<Scalar, BOTH_ENDS>
{
public:
static Scalar get(const Scalar& val)
{
return -val;
}
};
// Specialization for SMALLEST_MAGN
// This covers [float, double, complex] x [SMALLEST_MAGN]
template <typename Scalar>
class SortingTarget<Scalar, SMALLEST_MAGN>
{
public:
static typename ElemType<Scalar>::type get(const Scalar& val)
{
using std::abs;
return abs(val);
}
};
// Specialization for SMALLEST_REAL
// This covers [complex] x [SMALLEST_REAL]
template <typename RealType>
class SortingTarget<std::complex<RealType>, SMALLEST_REAL>
{
public:
static RealType get(const std::complex<RealType>& val)
{
return val.real();
}
};
// Specialization for SMALLEST_IMAG
// This covers [complex] x [SMALLEST_IMAG]
template <typename RealType>
class SortingTarget<std::complex<RealType>, SMALLEST_IMAG>
{
public:
static RealType get(const std::complex<RealType>& val)
{
using std::abs;
return abs(val.imag());
}
};
// Specialization for SMALLEST_ALGE
// This covers [float, double] x [SMALLEST_ALGE]
template <typename Scalar>
class SortingTarget<Scalar, SMALLEST_ALGE>
{
public:
static Scalar get(const Scalar& val)
{
return val;
}
};
// Sort eigenvalues and return the order index
template <typename PairType>
class PairComparator
{
public:
bool operator()(const PairType& v1, const PairType& v2)
{
return v1.first < v2.first;
}
};
template <typename T, int SelectionRule>
class SortEigenvalue
{
private:
typedef typename ElemType<T>::type TargetType; // Type of the sorting target, will be
// a floating number type, e.g. "double"
typedef std::pair<TargetType, int> PairType; // Type of the sorting pair, including
// the sorting target and the index
std::vector<PairType> pair_sort;
public:
SortEigenvalue(const T* start, int size) :
pair_sort(size)
{
for (int i = 0; i < size; i++)
{
pair_sort[i].first = SortingTarget<T, SelectionRule>::get(start[i]);
pair_sort[i].second = i;
}
PairComparator<PairType> comp;
std::sort(pair_sort.begin(), pair_sort.end(), comp);
}
std::vector<int> index()
{
std::vector<int> ind(pair_sort.size());
for (unsigned int i = 0; i < ind.size(); i++)
ind[i] = pair_sort[i].second;
return ind;
}
};
/// \endcond
} // namespace Spectra
#endif // SELECTION_RULE_H

View File

@ -0,0 +1,91 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SIMPLE_RANDOM_H
#define SIMPLE_RANDOM_H
#include <Eigen/Core>
/// \cond
namespace Spectra {
// We need a simple pseudo random number generator here:
// 1. It is used to generate initial and restarted residual vector.
// 2. It is not necessary to be so "random" and advanced. All we hope
// is that the residual vector is not in the space spanned by the
// current Krylov space. This should be met almost surely.
// 3. We don't want to call RNG in C++, since we actually want the
// algorithm to be deterministic. Also, calling RNG in C/C++ is not
// allowed in R packages submitted to CRAN.
// 4. The method should be as simple as possible, so an LCG is enough.
// 5. Based on public domain code by Ray Gardner
// http://stjarnhimlen.se/snippets/rg_rand.c
template <typename Scalar = double>
class SimpleRandom
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
const unsigned int m_a; // multiplier
const unsigned long m_max; // 2^31 - 1
long m_rand;
inline long next_long_rand(long seed)
{
unsigned long lo, hi;
lo = m_a * (long) (seed & 0xFFFF);
hi = m_a * (long) ((unsigned long) seed >> 16);
lo += (hi & 0x7FFF) << 16;
if (lo > m_max)
{
lo &= m_max;
++lo;
}
lo += hi >> 15;
if (lo > m_max)
{
lo &= m_max;
++lo;
}
return (long) lo;
}
public:
SimpleRandom(unsigned long init_seed) :
m_a(16807),
m_max(2147483647L),
m_rand(init_seed ? (init_seed & m_max) : 1)
{}
Scalar random()
{
m_rand = next_long_rand(m_rand);
return Scalar(m_rand) / Scalar(m_max) - Scalar(0.5);
}
// Vector of random numbers of type Scalar
// Ranging from -0.5 to 0.5
Vector random_vec(const Index len)
{
Vector res(len);
for (Index i = 0; i < len; i++)
{
m_rand = next_long_rand(m_rand);
res[i] = Scalar(m_rand) / Scalar(m_max) - Scalar(0.5);
}
return res;
}
};
} // namespace Spectra
/// \endcond
#endif // SIMPLE_RANDOM_H

View File

@ -0,0 +1,71 @@
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef TYPE_TRAITS_H
#define TYPE_TRAITS_H
#include <Eigen/Core>
#include <limits>
/// \cond
namespace Spectra {
// For a real value type "Scalar", we want to know its smallest
// positive value, i.e., std::numeric_limits<Scalar>::min().
// However, we must take non-standard value types into account,
// so we rely on Eigen::NumTraits.
//
// Eigen::NumTraits has defined epsilon() and lowest(), but
// lowest() means negative highest(), which is a very small
// negative value.
//
// Therefore, we manually define this limit, and use eplison()^3
// to mimic it for non-standard types.
// Generic definition
template <typename Scalar>
struct TypeTraits
{
static inline Scalar min()
{
return Eigen::numext::pow(Eigen::NumTraits<Scalar>::epsilon(), Scalar(3));
}
};
// Full specialization
template <>
struct TypeTraits<float>
{
static inline float min()
{
return std::numeric_limits<float>::min();
}
};
template <>
struct TypeTraits<double>
{
static inline double min()
{
return std::numeric_limits<double>::min();
}
};
template <>
struct TypeTraits<long double>
{
static inline long double min()
{
return std::numeric_limits<long double>::min();
}
};
} // namespace Spectra
/// \endcond
#endif // TYPE_TRAITS_H

View File

@ -0,0 +1,552 @@
// Written by Anna Araslanova
// Modified by Yixuan Qiu
// License: MIT
#ifndef LOBPCG_SOLVER
#define LOBPCG_SOLVER
#include <functional>
#include <map>
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/Eigenvalues>
#include <Eigen/SVD>
#include <Eigen/SparseCholesky>
#include "../SymGEigsSolver.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// *** METHOD
/// The class represent the LOBPCG algorithm, which was invented by Andrew Knyazev
/// Theoretical background of the procedure can be found in the articles below
/// - Knyazev, A.V., 2001. Toward the optimal preconditioned eigensolver : Locally optimal block preconditioned conjugate gradient method.SIAM journal on scientific computing, 23(2), pp.517 - 541.
/// - Knyazev, A.V., Argentati, M.E., Lashuk, I. and Ovtchinnikov, E.E., 2007. Block locally optimal preconditioned eigenvalue xolvers(BLOPEX) in HYPRE and PETSc.SIAM Journal on Scientific Computing, 29(5), pp.2224 - 2239.
///
/// *** CONDITIONS OF USE
/// Locally Optimal Block Preconditioned Conjugate Gradient(LOBPCG) is a method for finding the M smallest eigenvalues
/// and eigenvectors of a large symmetric positive definite generalized eigenvalue problem
/// \f$Ax=\lambda Bx,\f$
/// where \f$A_{NxN}\f$ is a symmetric matrix, \f$B\f$ is symmetric and positive - definite. \f$A and B\f$ are also assumed large and sparse
/// \f$\textit{M}\f$ should be \f$\<< textit{N}\f$ (at least \f$\textit{5M} < \textit{N} \f$)
///
/// *** ARGUMENTS
/// Eigen::SparseMatrix<long double> A; // N*N - Ax = lambda*Bx, lrage and sparse
/// Eigen::SparseMatrix<long double> X; // N*M - initial approximations to eigenvectors (random in general case)
/// Spectra::LOBPCGSolver<long double> solver(A, X);
/// *Eigen::SparseMatrix<long double> B; // N*N - Ax = lambda*Bx, sparse, positive definite
/// solver.setConstraints(B);
/// *Eigen::SparseMatrix<long double> Y; // N*K - constraints, already found eigenvectors
/// solver.setB(B);
/// *Eigen::SparseMatrix<long double> T; // N*N - preconditioner ~ A^-1
/// solver.setPreconditioner(T);
///
/// *** OUTCOMES
/// solver.solve(); // compute eigenpairs // void
/// solver.info(); // state of converjance // int
/// solver.residuals(); // get residuals to evaluate biases // Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>
/// solver.eigenvalues(); // get eigenvalues // Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>
/// solver.eigenvectors(); // get eigenvectors // Eigen::Matrix<Scalar, Eigen::Dynamic, 1>
///
/// *** EXAMPLE
/// \code{.cpp}
/// #include <Spectra/contrib/SymSparseEigsSolverLOBPCG.h>
///
/// // random A
/// Matrix a;
/// a = (Matrix::Random(10, 10).array() > 0.6).cast<long double>() * Matrix::Random(10, 10).array() * 5;
/// a = Matrix((a).triangularView<Eigen::Lower>()) + Matrix((a).triangularView<Eigen::Lower>()).transpose();
/// for (int i = 0; i < 10; i++)
/// a(i, i) = i + 0.5;
/// std::cout << a << "\n";
/// Eigen::SparseMatrix<long double> A(a.sparseView());
/// // random X
/// Eigen::Matrix<long double, 10, 2> x;
/// x = Matrix::Random(10, 2).array();
/// Eigen::SparseMatrix<long double> X(x.sparseView());
/// // solve Ax = lambda*x
/// Spectra::LOBPCGSolver<long double> solver(A, X);
/// solver.compute(10, 1e-4); // 10 iterations, L2_tolerance = 1e-4*N
/// std::cout << "info\n" << solver.info() << std::endl;
/// std::cout << "eigenvalues\n" << solver.eigenvalues() << std::endl;
/// std::cout << "eigenvectors\n" << solver.eigenvectors() << std::endl;
/// std::cout << "residuals\n" << solver.residuals() << std::endl;
/// \endcode
///
template <typename Scalar = long double>
class LOBPCGSolver
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::complex<Scalar> Complex;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
typedef Eigen::SparseMatrix<Scalar> SparseMatrix;
typedef Eigen::SparseMatrix<Complex> SparseComplexMatrix;
const int m_n; // dimension of matrix A
const int m_nev; // number of eigenvalues requested
SparseMatrix A, X;
SparseMatrix m_Y, m_B, m_preconditioner;
bool flag_with_constraints, flag_with_B, flag_with_preconditioner;
public:
SparseMatrix m_residuals;
Matrix m_evectors;
Vector m_evalues;
int m_info;
private:
// B-orthonormalize matrix M
int orthogonalizeInPlace(SparseMatrix& M, SparseMatrix& B,
SparseMatrix& true_BM, bool has_true_BM = false)
{
SparseMatrix BM;
if (has_true_BM == false)
{
if (flag_with_B)
{
BM = B * M;
}
else
{
BM = M;
}
}
else
{
BM = true_BM;
}
Eigen::SimplicialLDLT<SparseMatrix> chol_MBM(M.transpose() * BM);
if (chol_MBM.info() != SUCCESSFUL)
{
// LDLT decomposition fail
m_info = chol_MBM.info();
return chol_MBM.info();
}
SparseComplexMatrix Upper_MBM = chol_MBM.matrixU().template cast<Complex>();
ComplexVector D_MBM_vec = chol_MBM.vectorD().template cast<Complex>();
D_MBM_vec = D_MBM_vec.cwiseSqrt();
for (int i = 0; i < D_MBM_vec.rows(); i++)
{
D_MBM_vec(i) = Complex(1.0, 0.0) / D_MBM_vec(i);
}
SparseComplexMatrix D_MBM_mat(D_MBM_vec.asDiagonal());
SparseComplexMatrix U_inv(Upper_MBM.rows(), Upper_MBM.cols());
U_inv.setIdentity();
Upper_MBM.template triangularView<Eigen::Upper>().solveInPlace(U_inv);
SparseComplexMatrix right_product = U_inv * D_MBM_mat;
M = M * right_product.real();
if (flag_with_B)
{
true_BM = B * M;
}
else
{
true_BM = M;
}
return SUCCESSFUL;
}
void applyConstraintsInPlace(SparseMatrix& X, SparseMatrix& Y,
SparseMatrix& B)
{
SparseMatrix BY;
if (flag_with_B)
{
BY = B * Y;
}
else
{
BY = Y;
}
SparseMatrix YBY = Y.transpose() * BY;
SparseMatrix BYX = BY.transpose() * X;
SparseMatrix YBY_XYX = (Matrix(YBY).bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(Matrix(BYX))).sparseView();
X = X - Y * YBY_XYX;
}
/*
return
'AB
CD'
*/
Matrix stack_4_matricies(Matrix A, Matrix B,
Matrix C, Matrix D)
{
Matrix result(A.rows() + C.rows(), A.cols() + B.cols());
result.topLeftCorner(A.rows(), A.cols()) = A;
result.topRightCorner(B.rows(), B.cols()) = B;
result.bottomLeftCorner(C.rows(), C.cols()) = C;
result.bottomRightCorner(D.rows(), D.cols()) = D;
return result;
}
Matrix stack_9_matricies(Matrix A, Matrix B, Matrix C,
Matrix D, Matrix E, Matrix F,
Matrix G, Matrix H, Matrix I)
{
Matrix result(A.rows() + D.rows() + G.rows(), A.cols() + B.cols() + C.cols());
result.block(0, 0, A.rows(), A.cols()) = A;
result.block(0, A.cols(), B.rows(), B.cols()) = B;
result.block(0, A.cols() + B.cols(), C.rows(), C.cols()) = C;
result.block(A.rows(), 0, D.rows(), D.cols()) = D;
result.block(A.rows(), A.cols(), E.rows(), E.cols()) = E;
result.block(A.rows(), A.cols() + B.cols(), F.rows(), F.cols()) = F;
result.block(A.rows() + D.rows(), 0, G.rows(), G.cols()) = G;
result.block(A.rows() + D.rows(), A.cols(), H.rows(), H.cols()) = H;
result.block(A.rows() + D.rows(), A.cols() + B.cols(), I.rows(), I.cols()) = I;
return result;
}
void sort_epairs(Vector& evalues, Matrix& evectors, int SelectionRule)
{
std::function<bool(Scalar, Scalar)> cmp;
if (SelectionRule == SMALLEST_ALGE)
cmp = std::less<Scalar>{};
else
cmp = std::greater<Scalar>{};
std::map<Scalar, Vector, decltype(cmp)> epairs(cmp);
for (int i = 0; i < m_evectors.cols(); ++i)
epairs.insert(std::make_pair(evalues(i), evectors.col(i)));
int i = 0;
for (auto& epair : epairs)
{
evectors.col(i) = epair.second;
evalues(i) = epair.first;
i++;
}
}
void removeColumns(SparseMatrix& matrix, std::vector<int>& colToRemove)
{
// remove columns through matrix multiplication
SparseMatrix new_matrix(matrix.cols(), matrix.cols() - int(colToRemove.size()));
int iCol = 0;
std::vector<Eigen::Triplet<Scalar>> tripletList;
tripletList.reserve(matrix.cols() - int(colToRemove.size()));
for (int iRow = 0; iRow < matrix.cols(); iRow++)
{
if (std::find(colToRemove.begin(), colToRemove.end(), iRow) == colToRemove.end())
{
tripletList.push_back(Eigen::Triplet<Scalar>(iRow, iCol, 1));
iCol++;
}
}
new_matrix.setFromTriplets(tripletList.begin(), tripletList.end());
matrix = matrix * new_matrix;
}
int checkConvergence_getBlocksize(SparseMatrix& m_residuals, Scalar tolerance_L2, std::vector<int>& columnsToDelete)
{
// square roots from sum of squares by column
int BlockSize = m_nev;
Scalar sum, buffer;
for (int iCol = 0; iCol < m_nev; iCol++)
{
sum = 0;
for (int iRow = 0; iRow < m_n; iRow++)
{
buffer = m_residuals.coeff(iRow, iCol);
sum += buffer * buffer;
}
if (sqrt(sum) < tolerance_L2)
{
BlockSize--;
columnsToDelete.push_back(iCol);
}
}
return BlockSize;
}
public:
LOBPCGSolver(const SparseMatrix& A, const SparseMatrix X) :
m_n(A.rows()),
m_nev(X.cols()),
m_info(NOT_COMPUTED),
flag_with_constraints(false),
flag_with_B(false),
flag_with_preconditioner(false),
A(A),
X(X)
{
if (A.rows() != X.rows() || A.rows() != A.cols())
throw std::invalid_argument("Wrong size");
//if (m_n < 5* m_nev)
// throw std::invalid_argument("The problem size is small compared to the block size. Use standard eigensolver");
}
void setConstraints(const SparseMatrix& Y)
{
m_Y = Y;
flag_with_constraints = true;
}
void setB(const SparseMatrix& B)
{
if (B.rows() != A.rows() || B.cols() != A.cols())
throw std::invalid_argument("Wrong size");
m_B = B;
flag_with_B = true;
}
void setPreconditioner(const SparseMatrix& preconditioner)
{
m_preconditioner = preconditioner;
flag_with_preconditioner = true;
}
void compute(int maxit = 10, Scalar tol_div_n = 1e-7)
{
Scalar tolerance_L2 = tol_div_n * m_n;
int BlockSize;
int max_iter = std::min(m_n, maxit);
SparseMatrix directions, AX, AR, BX, AD, ADD, DD, BDD, BD, XAD, RAD, DAD, XBD, RBD, BR, sparse_eVecX, sparse_eVecR, sparse_eVecD, inverse_matrix;
Matrix XAR, RAR, XBR, gramA, gramB, eVecX, eVecR, eVecD;
std::vector<int> columnsToDelete;
if (flag_with_constraints)
{
// Apply the constraints Y to X
applyConstraintsInPlace(X, m_Y, m_B);
}
// Make initial vectors orthonormal
// implicit BX declaration
if (orthogonalizeInPlace(X, m_B, BX) != SUCCESSFUL)
{
max_iter = 0;
}
AX = A * X;
// Solve the following NxN eigenvalue problem for all N eigenvalues and -vectors:
// first approximation via a dense problem
Eigen::EigenSolver<Matrix> eigs(Matrix(X.transpose() * AX));
if (eigs.info() != SUCCESSFUL)
{
m_info = eigs.info();
max_iter = 0;
}
else
{
m_evalues = eigs.eigenvalues().real();
m_evectors = eigs.eigenvectors().real();
sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE);
sparse_eVecX = m_evectors.sparseView();
X = X * sparse_eVecX;
AX = AX * sparse_eVecX;
BX = BX * sparse_eVecX;
}
for (int iter_num = 0; iter_num < max_iter; iter_num++)
{
m_residuals.resize(m_n, m_nev);
for (int i = 0; i < m_nev; i++)
{
m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i);
}
BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete);
if (BlockSize == 0)
{
m_info = SUCCESSFUL;
break;
}
// substitution of the original active mask
if (columnsToDelete.size() > 0)
{
removeColumns(m_residuals, columnsToDelete);
if (iter_num > 0)
{
removeColumns(directions, columnsToDelete);
removeColumns(AD, columnsToDelete);
removeColumns(BD, columnsToDelete);
}
columnsToDelete.clear(); // for next iteration
}
if (flag_with_preconditioner)
{
// Apply the preconditioner to the residuals
m_residuals = m_preconditioner * m_residuals;
}
if (flag_with_constraints)
{
// Apply the constraints Y to residuals
applyConstraintsInPlace(m_residuals, m_Y, m_B);
}
if (orthogonalizeInPlace(m_residuals, m_B, BR) != SUCCESSFUL)
{
break;
}
AR = A * m_residuals;
// Orthonormalize conjugate directions
if (iter_num > 0)
{
if (orthogonalizeInPlace(directions, m_B, BD, true) != SUCCESSFUL)
{
break;
}
AD = A * directions;
}
// Perform the Rayleigh Ritz Procedure
XAR = Matrix(X.transpose() * AR);
RAR = Matrix(m_residuals.transpose() * AR);
XBR = Matrix(X.transpose() * BR);
if (iter_num > 0)
{
XAD = X.transpose() * AD;
RAD = m_residuals.transpose() * AD;
DAD = directions.transpose() * AD;
XBD = X.transpose() * BD;
RBD = m_residuals.transpose() * BD;
gramA = stack_9_matricies(m_evalues.asDiagonal(), XAR, XAD, XAR.transpose(), RAR, RAD, XAD.transpose(), RAD.transpose(), DAD.transpose());
gramB = stack_9_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBD, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize), RBD, XBD.transpose(), RBD.transpose(), Matrix::Identity(BlockSize, BlockSize));
}
else
{
gramA = stack_4_matricies(m_evalues.asDiagonal(), XAR, XAR.transpose(), RAR);
gramB = stack_4_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize));
}
//calculate the lowest/largest m eigenpairs; Solve the generalized eigenvalue problem.
DenseSymMatProd<Scalar> Aop(gramA);
DenseCholesky<Scalar> Bop(gramB);
SymGEigsSolver<Scalar, SMALLEST_ALGE, DenseSymMatProd<Scalar>,
DenseCholesky<Scalar>, GEIGS_CHOLESKY>
geigs(&Aop, &Bop, m_nev, std::min(10, int(gramA.rows()) - 1));
geigs.init();
int nconv = geigs.compute();
//Mat evecs;
if (geigs.info() == SUCCESSFUL)
{
m_evalues = geigs.eigenvalues();
m_evectors = geigs.eigenvectors();
sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE);
}
else
{
// Problem With General EgenVec
m_info = geigs.info();
break;
}
// Compute Ritz vectors
if (iter_num > 0)
{
eVecX = m_evectors.block(0, 0, m_nev, m_nev);
eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev);
eVecD = m_evectors.block(m_nev + BlockSize, 0, BlockSize, m_nev);
sparse_eVecX = eVecX.sparseView();
sparse_eVecR = eVecR.sparseView();
sparse_eVecD = eVecD.sparseView();
DD = m_residuals * sparse_eVecR; // new conjugate directions
ADD = AR * sparse_eVecR;
BDD = BR * sparse_eVecR;
DD = DD + directions * sparse_eVecD;
ADD = ADD + AD * sparse_eVecD;
BDD = BDD + BD * sparse_eVecD;
}
else
{
eVecX = m_evectors.block(0, 0, m_nev, m_nev);
eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev);
sparse_eVecX = eVecX.sparseView();
sparse_eVecR = eVecR.sparseView();
DD = m_residuals * sparse_eVecR;
ADD = AR * sparse_eVecR;
BDD = BR * sparse_eVecR;
}
X = X * sparse_eVecX + DD;
AX = AX * sparse_eVecX + ADD;
BX = BX * sparse_eVecX + BDD;
directions = DD;
AD = ADD;
BD = BDD;
} // iteration loop
// calculate last residuals
m_residuals.resize(m_n, m_nev);
for (int i = 0; i < m_nev; i++)
{
m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i);
}
BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete);
if (BlockSize == 0)
{
m_info = SUCCESSFUL;
}
} // compute
Vector eigenvalues()
{
return m_evalues;
}
Matrix eigenvectors()
{
return m_evectors;
}
Matrix residuals()
{
return Matrix(m_residuals);
}
int info() { return m_info; }
};
} // namespace Spectra
#endif // LOBPCG_SOLVER

View File

@ -0,0 +1,202 @@
// Copyright (C) 2018 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef PARTIAL_SVD_SOLVER_H
#define PARTIAL_SVD_SOLVER_H
#include <Eigen/Core>
#include "../SymEigsSolver.h"
namespace Spectra {
// Abstract class for matrix operation
template <typename Scalar>
class SVDMatOp
{
public:
virtual int rows() const = 0;
virtual int cols() const = 0;
// y_out = A' * A * x_in or y_out = A * A' * x_in
virtual void perform_op(const Scalar* x_in, Scalar* y_out) = 0;
virtual ~SVDMatOp() {}
};
// Operation of a tall matrix in SVD
// We compute the eigenvalues of A' * A
// MatrixType is either Eigen::Matrix<Scalar, ...> or Eigen::SparseMatrix<Scalar, ...>
template <typename Scalar, typename MatrixType>
class SVDTallMatOp : public SVDMatOp<Scalar>
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const MatrixType> ConstGenericMatrix;
ConstGenericMatrix m_mat;
const int m_dim;
Vector m_cache;
public:
// Constructor
SVDTallMatOp(ConstGenericMatrix& mat) :
m_mat(mat),
m_dim(std::min(mat.rows(), mat.cols())),
m_cache(mat.rows())
{}
// These are the rows and columns of A' * A
int rows() const { return m_dim; }
int cols() const { return m_dim; }
// y_out = A' * A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out)
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.cols());
m_cache.noalias() = m_mat * x;
y.noalias() = m_mat.transpose() * m_cache;
}
};
// Operation of a wide matrix in SVD
// We compute the eigenvalues of A * A'
// MatrixType is either Eigen::Matrix<Scalar, ...> or Eigen::SparseMatrix<Scalar, ...>
template <typename Scalar, typename MatrixType>
class SVDWideMatOp : public SVDMatOp<Scalar>
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const MatrixType> ConstGenericMatrix;
ConstGenericMatrix m_mat;
const int m_dim;
Vector m_cache;
public:
// Constructor
SVDWideMatOp(ConstGenericMatrix& mat) :
m_mat(mat),
m_dim(std::min(mat.rows(), mat.cols())),
m_cache(mat.cols())
{}
// These are the rows and columns of A * A'
int rows() const { return m_dim; }
int cols() const { return m_dim; }
// y_out = A * A' * x_in
void perform_op(const Scalar* x_in, Scalar* y_out)
{
MapConstVec x(x_in, m_mat.rows());
MapVec y(y_out, m_mat.rows());
m_cache.noalias() = m_mat.transpose() * x;
y.noalias() = m_mat * m_cache;
}
};
// Partial SVD solver
// MatrixType is either Eigen::Matrix<Scalar, ...> or Eigen::SparseMatrix<Scalar, ...>
template <typename Scalar = double,
typename MatrixType = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> >
class PartialSVDSolver
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef const Eigen::Ref<const MatrixType> ConstGenericMatrix;
ConstGenericMatrix m_mat;
const int m_m;
const int m_n;
SVDMatOp<Scalar>* m_op;
SymEigsSolver<Scalar, LARGEST_ALGE, SVDMatOp<Scalar> >* m_eigs;
int m_nconv;
Matrix m_evecs;
public:
// Constructor
PartialSVDSolver(ConstGenericMatrix& mat, int ncomp, int ncv) :
m_mat(mat), m_m(mat.rows()), m_n(mat.cols()), m_evecs(0, 0)
{
// Determine the matrix type, tall or wide
if (m_m > m_n)
{
m_op = new SVDTallMatOp<Scalar, MatrixType>(mat);
}
else
{
m_op = new SVDWideMatOp<Scalar, MatrixType>(mat);
}
// Solver object
m_eigs = new SymEigsSolver<Scalar, LARGEST_ALGE, SVDMatOp<Scalar> >(m_op, ncomp, ncv);
}
// Destructor
virtual ~PartialSVDSolver()
{
delete m_eigs;
delete m_op;
}
// Computation
int compute(int maxit = 1000, Scalar tol = 1e-10)
{
m_eigs->init();
m_nconv = m_eigs->compute(maxit, tol);
return m_nconv;
}
// The converged singular values
Vector singular_values() const
{
Vector svals = m_eigs->eigenvalues().cwiseSqrt();
return svals;
}
// The converged left singular vectors
Matrix matrix_U(int nu)
{
if (m_evecs.cols() < 1)
{
m_evecs = m_eigs->eigenvectors();
}
nu = std::min(nu, m_nconv);
if (m_m <= m_n)
{
return m_evecs.leftCols(nu);
}
return m_mat * (m_evecs.leftCols(nu).array().rowwise() / m_eigs->eigenvalues().head(nu).transpose().array().sqrt()).matrix();
}
// The converged right singular vectors
Matrix matrix_V(int nv)
{
if (m_evecs.cols() < 1)
{
m_evecs = m_eigs->eigenvectors();
}
nv = std::min(nv, m_nconv);
if (m_m > m_n)
{
return m_evecs.leftCols(nv);
}
return m_mat.transpose() * (m_evecs.leftCols(nv).array().rowwise() / m_eigs->eigenvalues().head(nv).transpose().array().sqrt()).matrix();
}
};
} // namespace Spectra
#endif // PARTIAL_SVD_SOLVER_H

View File

@ -133,12 +133,6 @@ endif()
# paths so that the compiler uses GTSAM headers in our source directory instead
# of any previously installed GTSAM headers.
target_include_directories(gtsam BEFORE PUBLIC
# SuiteSparse_config
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/SuiteSparse_config>
$<INSTALL_INTERFACE:include/gtsam/3rdparty/SuiteSparse_config>
# CCOLAMD
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/CCOLAMD/Include>
$<INSTALL_INTERFACE:include/gtsam/3rdparty/CCOLAMD>
# main gtsam includes:
$<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}>
$<INSTALL_INTERFACE:include/>
@ -147,6 +141,19 @@ target_include_directories(gtsam BEFORE PUBLIC
# unit tests:
$<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}/CppUnitLite>
)
# 3rdparty libraries: use the "system" flag so they are included via "-isystem"
# and warnings (and warnings-considered-errors) in those headers are not
# reported as warnings/errors in our targets:
target_include_directories(gtsam SYSTEM BEFORE PUBLIC
# SuiteSparse_config
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/SuiteSparse_config>
$<INSTALL_INTERFACE:include/gtsam/3rdparty/SuiteSparse_config>
# Spectra
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/Spectra>
# CCOLAMD
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/CCOLAMD/Include>
$<INSTALL_INTERFACE:include/gtsam/3rdparty/CCOLAMD>
)
if(GTSAM_SUPPORT_NESTED_DISSECTION)
target_include_directories(gtsam BEFORE PUBLIC
$<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}/gtsam/3rdparty/metis/include>
@ -210,5 +217,5 @@ if (GTSAM_INSTALL_MATLAB_TOOLBOX)
endif()
# Wrap
wrap_and_install_library(../gtsam.h "${GTSAM_ADDITIONAL_LIBRARIES}" "" "${mexFlags}")
wrap_and_install_library(gtsam.i "${GTSAM_ADDITIONAL_LIBRARIES}" "" "${mexFlags}")
endif ()

View File

@ -30,6 +30,14 @@
#include <iostream>
#include <typeinfo> // operator typeid
#ifdef _WIN32
#define GENERICVALUE_VISIBILITY
#else
// This will trigger a LNKxxxx on MSVC, so disable for MSVC build
// Please refer to https://github.com/borglab/gtsam/blob/develop/Using-GTSAM-EXPORT.md
#define GENERICVALUE_VISIBILITY GTSAM_EXPORT
#endif
namespace gtsam {
/**

View File

@ -72,9 +72,6 @@
// Make sure dependent projects that want it can see deprecated functions
#cmakedefine GTSAM_ALLOW_DEPRECATED_SINCE_V41
// Publish flag about Eigen typedef
#cmakedefine GTSAM_TYPEDEF_POINTS_TO_VECTORS
// Support Metis-based nested dissection
#cmakedefine GTSAM_SUPPORT_NESTED_DISSECTION

View File

@ -50,37 +50,6 @@ double distance2(const Point2& p, const Point2& q, OptionalJacobian<1, 2> H1,
}
}
#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS
/* ************************************************************************* */
void Point2::print(const string& s) const {
cout << s << *this << endl;
}
/* ************************************************************************* */
bool Point2::equals(const Point2& q, double tol) const {
return (std::abs(x() - q.x()) < tol && std::abs(y() - q.y()) < tol);
}
/* ************************************************************************* */
double Point2::norm(OptionalJacobian<1,2> H) const {
return gtsam::norm2(*this, H);
}
/* ************************************************************************* */
double Point2::distance(const Point2& point, OptionalJacobian<1,2> H1,
OptionalJacobian<1,2> H2) const {
return gtsam::distance2(*this, point, H1, H2);
}
/* ************************************************************************* */
ostream &operator<<(ostream &os, const Point2& p) {
os << '(' << p.x() << ", " << p.y() << ')';
return os;
}
#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS
/* ************************************************************************* */
// Math inspired by http://paulbourke.net/geometry/circlesphere/
boost::optional<Point2> circleCircleIntersection(double R_d, double r_d,

View File

@ -22,112 +22,10 @@
namespace gtsam {
#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS
/// As of GTSAM 4, in order to make GTSAM more lean,
/// it is now possible to just typedef Point2 to Vector2
typedef Vector2 Point2;
#else
/**
* A 2D point
* Complies with the Testable Concept
* Functional, so no set functions: once created, a point is constant.
* @addtogroup geometry
* \nosubgrouping
*/
class Point2 : public Vector2 {
private:
public:
enum { dimension = 2 };
/// @name Standard Constructors
/// @{
/// default constructor
Point2() {}
using Vector2::Vector2;
/// @}
/// @name Advanced Constructors
/// @{
/// construct from 2D vector
explicit Point2(const Vector2& v):Vector2(v) {}
/// @}
/// @name Testable
/// @{
/// print with optional string
GTSAM_EXPORT void print(const std::string& s = "") const;
/// equals with an tolerance, prints out message if unequal
GTSAM_EXPORT bool equals(const Point2& q, double tol = 1e-9) const;
/// @}
/// @name Group
/// @{
/// identity
inline static Point2 identity() {return Point2(0,0);}
/// @}
/// @name Vector Space
/// @{
/** creates a unit vector */
Point2 unit() const { return *this/norm(); }
/** norm of point, with derivative */
GTSAM_EXPORT double norm(OptionalJacobian<1,2> H = boost::none) const;
/** distance between two points */
GTSAM_EXPORT double distance(const Point2& p2, OptionalJacobian<1,2> H1 = boost::none,
OptionalJacobian<1,2> H2 = boost::none) const;
/// @}
/// @name Standard Interface
/// @{
/// equality
inline bool operator ==(const Point2& q) const {return x()==q.x() && y()==q.y();}
/// get x
inline double x() const {return (*this)[0];}
/// get y
inline double y() const {return (*this)[1];}
/// return vectorized form (column-wise).
const Vector2& vector() const { return *this; }
/// @}
/// Streaming
GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point2& p);
private:
/// @name Advanced Interface
/// @{
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/)
{
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Vector2);}
/// @}
};
template<>
struct traits<Point2> : public internal::VectorSpace<Point2> {
};
#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS
/// Distance of the point from the origin, with Jacobian
GTSAM_EXPORT double norm2(const Point2& p, OptionalJacobian<1, 2> H = boost::none);

View File

@ -22,47 +22,6 @@ using namespace std;
namespace gtsam {
#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS
bool Point3::equals(const Point3 &q, double tol) const {
return (std::abs(x() - q.x()) < tol && std::abs(y() - q.y()) < tol &&
std::abs(z() - q.z()) < tol);
}
void Point3::print(const string& s) const {
cout << s << *this << endl;
}
/* ************************************************************************* */
double Point3::distance(const Point3 &q, OptionalJacobian<1, 3> H1,
OptionalJacobian<1, 3> H2) const {
return gtsam::distance3(*this,q,H1,H2);
}
double Point3::norm(OptionalJacobian<1,3> H) const {
return gtsam::norm3(*this, H);
}
Point3 Point3::normalized(OptionalJacobian<3,3> H) const {
return gtsam::normalize(*this, H);
}
Point3 Point3::cross(const Point3 &q, OptionalJacobian<3, 3> H1,
OptionalJacobian<3, 3> H2) const {
return gtsam::cross(*this, q, H1, H2);
}
double Point3::dot(const Point3 &q, OptionalJacobian<1, 3> H1,
OptionalJacobian<1, 3> H2) const {
return gtsam::dot(*this, q, H1, H2);
}
/* ************************************************************************* */
ostream &operator<<(ostream &os, const Point3& p) {
os << '[' << p.x() << ", " << p.y() << ", " << p.z() << "]'";
return os;
}
#endif
/* ************************************************************************* */
double distance3(const Point3 &p1, const Point3 &q, OptionalJacobian<1, 3> H1,
OptionalJacobian<1, 3> H2) {

View File

@ -29,107 +29,10 @@
namespace gtsam {
#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS
/// As of GTSAM 4, in order to make GTSAM more lean,
/// it is now possible to just typedef Point3 to Vector3
typedef Vector3 Point3;
#else
/**
* A 3D point is just a Vector3 with some additional methods
* @addtogroup geometry
* \nosubgrouping
*/
class Point3 : public Vector3 {
public:
enum { dimension = 3 };
/// @name Standard Constructors
/// @{
using Vector3::Vector3;
/// @}
/// @name Testable
/// @{
/** print with optional string */
GTSAM_EXPORT void print(const std::string& s = "") const;
/** equals with an tolerance */
GTSAM_EXPORT bool equals(const Point3& p, double tol = 1e-9) const;
/// @}
/// @name Group
/// @{
/// identity for group operation
inline static Point3 identity() { return Point3(0.0, 0.0, 0.0); }
/// @}
/// @name Vector Space
/// @{
/** distance between two points */
GTSAM_EXPORT double distance(const Point3& p2, OptionalJacobian<1, 3> H1 = boost::none,
OptionalJacobian<1, 3> H2 = boost::none) const;
/** Distance of the point from the origin, with Jacobian */
GTSAM_EXPORT double norm(OptionalJacobian<1,3> H = boost::none) const;
/** normalize, with optional Jacobian */
GTSAM_EXPORT Point3 normalized(OptionalJacobian<3, 3> H = boost::none) const;
/** cross product @return this x q */
GTSAM_EXPORT Point3 cross(const Point3 &q, OptionalJacobian<3, 3> H_p = boost::none, //
OptionalJacobian<3, 3> H_q = boost::none) const;
/** dot product @return this * q*/
GTSAM_EXPORT double dot(const Point3 &q, OptionalJacobian<1, 3> H_p = boost::none, //
OptionalJacobian<1, 3> H_q = boost::none) const;
/// @}
/// @name Standard Interface
/// @{
/// return as Vector3
const Vector3& vector() const { return *this; }
/// get x
inline double x() const {return (*this)[0];}
/// get y
inline double y() const {return (*this)[1];}
/// get z
inline double z() const {return (*this)[2];}
/// @}
/// Output stream operator
GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point3& p);
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Vector3);
}
};
template<>
struct traits<Point3> : public internal::VectorSpace<Point3> {};
template<>
struct traits<const Point3> : public internal::VectorSpace<Point3> {};
#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS
// Convenience typedef
typedef std::pair<Point3, Point3> Point3Pair;
GTSAM_EXPORT std::ostream &operator<<(std::ostream &os, const gtsam::Point3Pair &p);

View File

@ -322,10 +322,10 @@ boost::optional<Pose2> align(const vector<Point2Pair>& pairs) {
// calculate cos and sin
double c=0,s=0;
for(const Point2Pair& pair: pairs) {
Point2 dq = pair.first - cp;
Point2 dp = pair.second - cq;
Point2 dp = pair.first - cp;
Point2 dq = pair.second - cq;
c += dp.x() * dq.x() + dp.y() * dq.y();
s += dp.y() * dq.x() - dp.x() * dq.y(); // this works but is negative from formula above !! :-(
s += -dp.y() * dq.x() + dp.x() * dq.y();
}
// calculate angle and translation

View File

@ -430,7 +430,7 @@ boost::optional<Pose3> align(const vector<Point3Pair>& baPointPairs) {
std::ostream &operator<<(std::ostream &os, const Pose3& pose) {
// Both Rot3 and Point3 have ostream definitions so we use them.
os << "R: " << pose.rotation() << "\n";
os << "t: " << pose.translation();
os << "t: " << pose.translation().transpose();
return os;
}

View File

@ -39,6 +39,13 @@ Rot2 Rot2::atan2(double y, double x) {
return R.normalize();
}
/* ************************************************************************* */
Rot2 Rot2::Random(std::mt19937& rng) {
uniform_real_distribution<double> randomAngle(-M_PI, M_PI);
double angle = randomAngle(rng);
return fromAngle(angle);
}
/* ************************************************************************* */
void Rot2::print(const string& s) const {
cout << s << ": " << theta() << endl;

View File

@ -22,6 +22,8 @@
#include <gtsam/base/Lie.h>
#include <boost/optional.hpp>
#include <random>
namespace gtsam {
/**
@ -79,6 +81,14 @@ namespace gtsam {
/** Named constructor that behaves as atan2, i.e., y,x order (!) and normalizes */
static Rot2 atan2(double y, double x);
/**
* Random, generates random angle \in [-p,pi]
* Example:
* std::mt19937 engine(42);
* Unit3 unit = Unit3::Random(engine);
*/
static Rot2 Random(std::mt19937 & rng);
/// @}
/// @name Testable
/// @{

View File

@ -60,8 +60,9 @@ typename SO<N>::TangentVector SO<N>::ChartAtOrigin::Local(const SO& R,
template <int N>
typename SO<N>::MatrixDD SO<N>::AdjointMap() const {
if (N==2) return I_1x1; // SO(2) case
throw std::runtime_error(
"SO<N>::AdjointMap only implemented for SO3 and SO4.");
"SO<N>::AdjointMap only implemented for SO2, SO3 and SO4.");
}
template <int N>
@ -84,26 +85,22 @@ typename SO<N>::MatrixDD SO<N>::LogmapDerivative(const TangentVector& omega) {
throw std::runtime_error("O<N>::LogmapDerivative only implemented for SO3.");
}
// Default fixed size version (but specialized elsewehere for N=2,3,4)
template <int N>
typename SO<N>::VectorN2 SO<N>::vec(
OptionalJacobian<internal::NSquaredSO(N), dimension> H) const {
const size_t n = rows();
const size_t n2 = n * n;
// Vectorize
VectorN2 X(n2);
X << Eigen::Map<const Matrix>(matrix_.data(), n2, 1);
VectorN2 X = Eigen::Map<const VectorN2>(matrix_.data());
// If requested, calculate H as (I \oplus Q) * P,
// where Q is the N*N rotation matrix, and P is calculated below.
if (H) {
// Calculate P matrix of vectorized generators
// TODO(duy): Should we refactor this as the jacobian of Hat?
Matrix P = VectorizedGenerators(n);
const size_t d = dim();
H->resize(n2, d);
for (size_t i = 0; i < n; i++) {
H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d);
Matrix P = SO<N>::VectorizedGenerators();
for (size_t i = 0; i < N; i++) {
H->block(i * N, 0, N, dimension) =
matrix_ * P.block(i * N, 0, N, dimension);
}
}
return X;

View File

@ -22,21 +22,18 @@
namespace gtsam {
template <>
GTSAM_EXPORT
Matrix SOn::Hat(const Vector& xi) {
GTSAM_EXPORT void SOn::Hat(const Vector &xi, Eigen::Ref<Matrix> X) {
size_t n = AmbientDim(xi.size());
if (n < 2) throw std::invalid_argument("SO<N>::Hat: n<2 not supported");
Matrix X(n, n); // allocate space for n*n skew-symmetric matrix
X.setZero();
if (n == 2) {
if (n < 2)
throw std::invalid_argument("SO<N>::Hat: n<2 not supported");
else if (n == 2) {
// Handle SO(2) case as recursion bottom
assert(xi.size() == 1);
X << 0, -xi(0), xi(0), 0;
} else {
// Recursively call SO(n-1) call for top-left block
const size_t dmin = (n - 1) * (n - 2) / 2;
X.topLeftCorner(n - 1, n - 1) = Hat(xi.tail(dmin));
Hat(xi.tail(dmin), X.topLeftCorner(n - 1, n - 1));
// determine sign of last element (signs alternate)
double sign = pow(-1.0, xi.size());
@ -47,7 +44,14 @@ Matrix SOn::Hat(const Vector& xi) {
X(j, n - 1) = -X(n - 1, j);
sign = -sign;
}
X(n - 1, n - 1) = 0; // bottom-right
}
}
template <> GTSAM_EXPORT Matrix SOn::Hat(const Vector &xi) {
size_t n = AmbientDim(xi.size());
Matrix X(n, n); // allocate space for n*n skew-symmetric matrix
SOn::Hat(xi, X);
return X;
}
@ -99,4 +103,27 @@ SOn LieGroup<SOn, Eigen::Dynamic>::between(const SOn& g, DynamicJacobian H1,
return result;
}
// Dynamic version of vec
template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const {
const size_t n = rows(), n2 = n * n;
// Vectorize
VectorN2 X(n2);
X << Eigen::Map<const Matrix>(matrix_.data(), n2, 1);
// If requested, calculate H as (I \oplus Q) * P,
// where Q is the N*N rotation matrix, and P is calculated below.
if (H) {
// Calculate P matrix of vectorized generators
// TODO(duy): Should we refactor this as the jacobian of Hat?
Matrix P = SOn::VectorizedGenerators(n);
const size_t d = dim();
H->resize(n2, d);
for (size_t i = 0; i < n; i++) {
H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d);
}
}
return X;
}
} // namespace gtsam

View File

@ -98,8 +98,8 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
template <typename Derived, int N_ = N, typename = IsDynamic<N_>>
static SO Lift(size_t n, const Eigen::MatrixBase<Derived> &R) {
Matrix Q = Matrix::Identity(n, n);
size_t p = R.rows();
assert(p < n && R.cols() == p);
const int p = R.rows();
assert(p >= 0 && p <= static_cast<int>(n) && R.cols() == p);
Q.topLeftCorner(p, p) = R;
return SO(Q);
}
@ -208,7 +208,7 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
// Calculate run-time dimensionality of manifold.
// Available as dimension or Dim() for fixed N.
size_t dim() const { return Dimension(matrix_.rows()); }
size_t dim() const { return Dimension(static_cast<size_t>(matrix_.rows())); }
/**
* Hat operator creates Lie algebra element corresponding to d-vector, where d
@ -227,9 +227,10 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
*/
static MatrixNN Hat(const TangentVector& xi);
/**
* Inverse of Hat. See note about xi element order in Hat.
*/
/// In-place version of Hat (see details there), implements recursion.
static void Hat(const Vector &xi, Eigen::Ref<MatrixNN> X);
/// Inverse of Hat. See note about xi element order in Hat.
static TangentVector Vee(const MatrixNN& X);
// Chart at origin
@ -295,10 +296,10 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
template <int N_ = N, typename = IsFixed<N_>>
static Matrix VectorizedGenerators() {
constexpr size_t N2 = static_cast<size_t>(N * N);
Matrix G(N2, dimension);
Eigen::Matrix<double, N2, dimension> G;
for (size_t j = 0; j < dimension; j++) {
const auto X = Hat(Vector::Unit(dimension, j));
G.col(j) = Eigen::Map<const Matrix>(X.data(), N2, 1);
G.col(j) = Eigen::Map<const VectorN2>(X.data());
}
return G;
}
@ -362,6 +363,11 @@ template <>
SOn LieGroup<SOn, Eigen::Dynamic>::between(const SOn& g, DynamicJacobian H1,
DynamicJacobian H2) const;
/*
* Specialize dynamic vec.
*/
template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const;
/** Serialization function */
template<class Archive>
void serialize(

View File

@ -237,16 +237,6 @@ TEST( Point2, circleCircleIntersection) {
}
/* ************************************************************************* */
#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS
TEST( Point2, stream) {
Point2 p(1, 2);
std::ostringstream os;
os << p;
EXPECT(os.str() == "(1, 2)");
}
#endif
/* ************************************************************************* */
int main () {
TestResult tr;

View File

@ -153,16 +153,6 @@ TEST( Point3, cross2) {
}
}
/* ************************************************************************* */
#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS
TEST( Point3, stream) {
Point3 p(1, 2, -3);
std::ostringstream os;
os << p;
EXPECT(os.str() == "[1, 2, -3]'");
}
#endif
//*************************************************************************
TEST (Point3, normalize) {
Matrix actualH;

View File

@ -857,19 +857,11 @@ TEST( Pose3, adjointTranspose) {
}
/* ************************************************************************* */
TEST( Pose3, stream)
{
Pose3 T;
TEST( Pose3, stream) {
std::ostringstream os;
os << T;
string expected;
#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS
expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: 0\n0\n0";;
#else
expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: [0, 0, 0]'";
#endif
os << Pose3();
string expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: 0 0 0";
EXPECT(os.str() == expected);
}
@ -1042,14 +1034,7 @@ TEST(Pose3, print) {
// Add expected rotation
expected << "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\n";
#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS
expected << "t: 1\n"
"2\n"
"3\n";
#else
expected << "t: [" << translation.x() << ", " << translation.y() << ", " << translation.z() << "]'\n";
#endif
expected << "t: 1 2 3\n";
// reset cout to the original stream
std::cout.rdbuf(oldbuf);

View File

@ -39,8 +39,8 @@ using namespace std;
using namespace gtsam;
//******************************************************************************
// Test dhynamic with n=0
TEST(SOn, SO0) {
// Test dynamic with n=0
TEST(SOn, SOn0) {
const auto R = SOn(0);
EXPECT_LONGS_EQUAL(0, R.rows());
EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension);
@ -50,7 +50,8 @@ TEST(SOn, SO0) {
}
//******************************************************************************
TEST(SOn, SO5) {
// Test dynamic with n=5
TEST(SOn, SOn5) {
const auto R = SOn(5);
EXPECT_LONGS_EQUAL(5, R.rows());
EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension);
@ -59,6 +60,28 @@ TEST(SOn, SO5) {
EXPECT_LONGS_EQUAL(10, traits<SOn>::GetDimension(R));
}
//******************************************************************************
// Test fixed with n=2
TEST(SOn, SO0) {
const auto R = SO<2>();
EXPECT_LONGS_EQUAL(2, R.rows());
EXPECT_LONGS_EQUAL(1, SO<2>::dimension);
EXPECT_LONGS_EQUAL(1, SO<2>::Dim());
EXPECT_LONGS_EQUAL(1, R.dim());
EXPECT_LONGS_EQUAL(1, traits<SO<2>>::GetDimension(R));
}
//******************************************************************************
// Test fixed with n=5
TEST(SOn, SO5) {
const auto R = SO<5>();
EXPECT_LONGS_EQUAL(5, R.rows());
EXPECT_LONGS_EQUAL(10, SO<5>::dimension);
EXPECT_LONGS_EQUAL(10, SO<5>::Dim());
EXPECT_LONGS_EQUAL(10, R.dim());
EXPECT_LONGS_EQUAL(10, traits<SO<5>>::GetDimension(R));
}
//******************************************************************************
TEST(SOn, Concept) {
BOOST_CONCEPT_ASSERT((IsGroup<SOn>));
@ -159,6 +182,22 @@ TEST(SOn, RetractLocal) {
CHECK(assert_equal(v1, SOn::ChartAtOrigin::Local(Q1), 1e-7));
}
//******************************************************************************
Matrix RetractJacobian(size_t n) { return SOn::VectorizedGenerators(n); }
/// Test Jacobian of Retract at origin
TEST(SOn, RetractJacobian) {
Matrix actualH = RetractJacobian(3);
boost::function<Matrix(const Vector &)> h = [](const Vector &v) {
return SOn::ChartAtOrigin::Retract(v).matrix();
};
Vector3 v;
v.setZero();
const Matrix expectedH = numericalDerivative11<Matrix, Vector, 3>(h, v, 1e-5);
CHECK(assert_equal(expectedH, actualH));
}
//******************************************************************************
TEST(SOn, vec) {
Vector10 v;
@ -171,6 +210,23 @@ TEST(SOn, vec) {
CHECK(assert_equal(H, actualH));
}
//******************************************************************************
TEST(SOn, VectorizedGenerators) {
// Default fixed
auto actual2 = SO<2>::VectorizedGenerators();
CHECK(actual2.rows()==4 && actual2.cols()==1)
// Specialized
auto actual3 = SO<3>::VectorizedGenerators();
CHECK(actual3.rows()==9 && actual3.cols()==3)
auto actual4 = SO<4>::VectorizedGenerators();
CHECK(actual4.rows()==16 && actual4.cols()==6)
// Dynamic
auto actual5 = SOn::VectorizedGenerators(5);
CHECK(actual5.rows()==25 && actual5.cols()==10)
}
//******************************************************************************
int main() {
TestResult tr;

View File

@ -618,6 +618,9 @@ class SOn {
// Other methods
Vector vec() const;
Matrix matrix() const;
// enabling serialization functionality
void serialize() const;
};
#include <gtsam/geometry/Rot3.h>
@ -2172,6 +2175,10 @@ class Values {
// void update(size_t j, const gtsam::Value& val);
// gtsam::Value at(size_t j) const;
// The order is important: Vector has to precede Point2/Point3 so `atVector`
// can work for those fixed-size vectors.
void insert(size_t j, Vector vector);
void insert(size_t j, Matrix matrix);
void insert(size_t j, const gtsam::Point2& point2);
void insert(size_t j, const gtsam::Point3& point3);
void insert(size_t j, const gtsam::Rot2& rot2);
@ -2188,8 +2195,6 @@ class Values {
void insert(size_t j, const gtsam::PinholeCameraCal3_S2& simple_camera);
void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias);
void insert(size_t j, const gtsam::NavState& nav_state);
void insert(size_t j, Vector vector);
void insert(size_t j, Matrix matrix);
void update(size_t j, const gtsam::Point2& point2);
void update(size_t j, const gtsam::Point3& point3);
@ -2569,10 +2574,12 @@ virtual class BetweenFactor : gtsam::NoiseModelFactor {
void serialize() const;
};
#include <gtsam/nonlinear/NonlinearEquality.h>
template<T = {gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2, gtsam::SO3, gtsam::SO4, gtsam::SOn, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::Cal3_S2, gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::PinholeCameraCal3_S2, gtsam::imuBias::ConstantBias}>
template <T = {gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2,
gtsam::SO3, gtsam::SO4, gtsam::SOn, gtsam::Rot3, gtsam::Pose2,
gtsam::Pose3, gtsam::Cal3_S2, gtsam::CalibratedCamera,
gtsam::SimpleCamera, gtsam::PinholeCameraCal3_S2,
gtsam::imuBias::ConstantBias}>
virtual class NonlinearEquality : gtsam::NoiseModelFactor {
// Constructor - forces exact evaluation
NonlinearEquality(size_t j, const T& feasible);
@ -2583,7 +2590,6 @@ virtual class NonlinearEquality : gtsam::NoiseModelFactor {
void serialize() const;
};
#include <gtsam/sam/RangeFactor.h>
template<POSE, POINT>
virtual class RangeFactor : gtsam::NoiseModelFactor {
@ -2796,28 +2802,48 @@ class SfmData {
string findExampleDataFile(string name);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load2D(string filename,
gtsam::noiseModel::Diagonal* model, int maxID, bool addNoise, bool smart);
gtsam::noiseModel::Diagonal* model, int maxIndex, bool addNoise, bool smart);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load2D(string filename,
gtsam::noiseModel::Diagonal* model, int maxID, bool addNoise);
gtsam::noiseModel::Diagonal* model, int maxIndex, bool addNoise);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load2D(string filename,
gtsam::noiseModel::Diagonal* model, int maxID);
gtsam::noiseModel::Diagonal* model, int maxIndex);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load2D(string filename,
gtsam::noiseModel::Diagonal* model);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load2D(string filename);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load2D_robust(string filename,
gtsam::noiseModel::Base* model);
gtsam::noiseModel::Base* model, int maxIndex);
void save2D(const gtsam::NonlinearFactorGraph& graph,
const gtsam::Values& config, gtsam::noiseModel::Diagonal* model,
string filename);
// std::vector<gtsam::BetweenFactor<Pose2>::shared_ptr>
// Ignored by pybind -> will be List[BetweenFactorPose2]
class BetweenFactorPose2s
{
BetweenFactorPose2s();
size_t size() const;
gtsam::BetweenFactor<gtsam::Pose2>* at(size_t i) const;
void push_back(const gtsam::BetweenFactor<gtsam::Pose2>* factor);
};
gtsam::BetweenFactorPose2s parse2DFactors(string filename);
// std::vector<gtsam::BetweenFactor<Pose3>::shared_ptr>
// Ignored by pybind -> will be List[BetweenFactorPose3]
class BetweenFactorPose3s
{
BetweenFactorPose3s();
size_t size() const;
gtsam::BetweenFactorPose3* at(size_t i) const;
void push_back(const gtsam::BetweenFactorPose3* factor);
gtsam::BetweenFactor<gtsam::Pose3>* at(size_t i) const;
void push_back(const gtsam::BetweenFactor<gtsam::Pose3>* factor);
};
gtsam::BetweenFactorPose3s parse3DFactors(string filename);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load3D(string filename);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> readG2o(string filename);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> readG2o(string filename, bool is3D);
void writeG2o(const gtsam::NonlinearFactorGraph& graph,
const gtsam::Values& estimate, string filename);
#include <gtsam/slam/InitializePose3.h>
class InitializePose3 {
@ -2839,14 +2865,6 @@ class InitializePose3 {
static gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph);
};
gtsam::BetweenFactorPose3s parse3DFactors(string filename);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load3D(string filename);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> readG2o(string filename);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> readG2o(string filename, bool is3D);
void writeG2o(const gtsam::NonlinearFactorGraph& graph,
const gtsam::Values& estimate, string filename);
#include <gtsam/slam/KarcherMeanFactor-inl.h>
template<T = {gtsam::Point2, gtsam::Rot2, gtsam::Pose2, gtsam::Point3, gtsam::SO3, gtsam::SO4, gtsam::Rot3, gtsam::Pose3}>
virtual class KarcherMeanFactor : gtsam::NonlinearFactor {
@ -2854,7 +2872,7 @@ virtual class KarcherMeanFactor : gtsam::NonlinearFactor {
};
#include <gtsam/slam/FrobeniusFactor.h>
gtsam::noiseModel::Isotropic* ConvertPose3NoiseModel(
gtsam::noiseModel::Isotropic* ConvertNoiseModel(
gtsam::noiseModel::Base* model, size_t d);
template<T = {gtsam::SO3, gtsam::SO4}>
@ -2873,14 +2891,148 @@ virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor {
Vector evaluateError(const T& R1, const T& R2);
};
virtual class FrobeniusWormholeFactor : gtsam::NoiseModelFactor {
FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12,
#include <gtsam/sfm/ShonanFactor.h>
virtual class ShonanFactor3 : gtsam::NoiseModelFactor {
ShonanFactor3(size_t key1, size_t key2, const gtsam::Rot3 &R12,
size_t p);
FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12,
ShonanFactor3(size_t key1, size_t key2, const gtsam::Rot3 &R12,
size_t p, gtsam::noiseModel::Base *model);
Vector evaluateError(const gtsam::SOn &Q1, const gtsam::SOn &Q2);
};
#include <gtsam/sfm/BinaryMeasurement.h>
template<T>
class BinaryMeasurement {
BinaryMeasurement(size_t key1, size_t key2, const T& measured,
const gtsam::noiseModel::Base* model);
size_t key1() const;
size_t key2() const;
T measured() const;
};
typedef gtsam::BinaryMeasurement<gtsam::Unit3> BinaryMeasurementUnit3;
typedef gtsam::BinaryMeasurement<gtsam::Rot3> BinaryMeasurementRot3;
#include <gtsam/sfm/ShonanAveraging.h>
// TODO(frank): copy/pasta below until we have integer template paremeters in wrap!
class ShonanAveragingParameters2 {
ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm);
ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm, string method);
gtsam::LevenbergMarquardtParams getLMParams() const;
void setOptimalityThreshold(double value);
double getOptimalityThreshold() const;
void setAnchor(size_t index, const gtsam::Rot2& value);
void setAnchorWeight(double value);
double getAnchorWeight() const;
void setKarcherWeight(double value);
double getKarcherWeight();
void setGaugesWeight(double value);
double getGaugesWeight();
};
class ShonanAveragingParameters3 {
ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm);
ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm, string method);
gtsam::LevenbergMarquardtParams getLMParams() const;
void setOptimalityThreshold(double value);
double getOptimalityThreshold() const;
void setAnchor(size_t index, const gtsam::Rot3& value);
void setAnchorWeight(double value);
double getAnchorWeight() const;
void setKarcherWeight(double value);
double getKarcherWeight();
void setGaugesWeight(double value);
double getGaugesWeight();
};
class ShonanAveraging2 {
ShonanAveraging2(string g2oFile);
ShonanAveraging2(string g2oFile,
const gtsam::ShonanAveragingParameters2 &parameters);
// Query properties
size_t nrUnknowns() const;
size_t nrMeasurements() const;
gtsam::Rot2 measured(size_t i);
gtsam::KeyVector keys(size_t i);
// Matrix API (advanced use, debugging)
Matrix denseD() const;
Matrix denseQ() const;
Matrix denseL() const;
// Matrix computeLambda_(Matrix S) const;
Matrix computeLambda_(const gtsam::Values& values) const;
Matrix computeA_(const gtsam::Values& values) const;
double computeMinEigenValue(const gtsam::Values& values) const;
gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values,
const Vector& minEigenVector, double minEigenValue) const;
// Advanced API
gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const;
gtsam::Values initializeRandomlyAt(size_t p) const;
double costAt(size_t p, const gtsam::Values& values) const;
pair<double, Vector> computeMinEigenVector(const gtsam::Values& values) const;
bool checkOptimality(const gtsam::Values& values) const;
gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial);
// gtsam::Values tryOptimizingAt(size_t p) const;
gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const;
gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const;
gtsam::Values roundSolution(const gtsam::Values& values) const;
// Basic API
double cost(const gtsam::Values& values) const;
gtsam::Values initializeRandomly() const;
pair<gtsam::Values, double> run(const gtsam::Values& initial, size_t min_p, size_t max_p) const;
};
class ShonanAveraging3 {
ShonanAveraging3(string g2oFile);
ShonanAveraging3(string g2oFile,
const gtsam::ShonanAveragingParameters3 &parameters);
// TODO(frank): deprecate once we land pybind wrapper
ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors);
ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors,
const gtsam::ShonanAveragingParameters3 &parameters);
// Query properties
size_t nrUnknowns() const;
size_t nrMeasurements() const;
gtsam::Rot3 measured(size_t i);
gtsam::KeyVector keys(size_t i);
// Matrix API (advanced use, debugging)
Matrix denseD() const;
Matrix denseQ() const;
Matrix denseL() const;
// Matrix computeLambda_(Matrix S) const;
Matrix computeLambda_(const gtsam::Values& values) const;
Matrix computeA_(const gtsam::Values& values) const;
double computeMinEigenValue(const gtsam::Values& values) const;
gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values,
const Vector& minEigenVector, double minEigenValue) const;
// Advanced API
gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const;
gtsam::Values initializeRandomlyAt(size_t p) const;
double costAt(size_t p, const gtsam::Values& values) const;
pair<double, Vector> computeMinEigenVector(const gtsam::Values& values) const;
bool checkOptimality(const gtsam::Values& values) const;
gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial);
// gtsam::Values tryOptimizingAt(size_t p) const;
gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const;
gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const;
gtsam::Values roundSolution(const gtsam::Values& values) const;
// Basic API
double cost(const gtsam::Values& values) const;
gtsam::Values initializeRandomly() const;
pair<gtsam::Values, double> run(const gtsam::Values& initial, size_t min_p, size_t max_p) const;
};
//*************************************************************************
// Navigation
//*************************************************************************
@ -3236,6 +3388,7 @@ namespace utilities {
gtsam::KeySet createKeySet(string s, Vector I);
Matrix extractPoint2(const gtsam::Values& values);
Matrix extractPoint3(const gtsam::Values& values);
gtsam::Values allPose2s(gtsam::Values& values);
Matrix extractPose2(const gtsam::Values& values);
gtsam::Values allPose3s(gtsam::Values& values);
Matrix extractPose3(const gtsam::Values& values);

View File

@ -91,6 +91,7 @@ namespace gtsam {
template<class DERIVED, class FACTORGRAPH>
size_t BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::numCachedSeparatorMarginals() const
{
std::lock_guard<std::mutex> marginalLock(cachedSeparatorMarginalMutex_);
if (!cachedSeparatorMarginal_)
return 0;
@ -144,6 +145,7 @@ namespace gtsam {
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::separatorMarginal(
Eliminate function) const {
std::lock_guard<std::mutex> marginalLock(cachedSeparatorMarginalMutex_);
gttic(BayesTreeCliqueBase_separatorMarginal);
// Check if the Separator marginal was already calculated
if (!cachedSeparatorMarginal_) {
@ -206,6 +208,8 @@ namespace gtsam {
// When a shortcut is requested, all of the shortcuts between it and the
// root are also generated. So, if this clique's cached shortcut is set,
// recursively call over all child cliques. Otherwise, it is unnecessary.
std::lock_guard<std::mutex> marginalLock(cachedSeparatorMarginalMutex_);
if (cachedSeparatorMarginal_) {
for(derived_ptr& child: children) {
child->deleteCachedShortcuts();

View File

@ -24,6 +24,7 @@
#include <boost/optional.hpp>
#include <string>
#include <mutex>
namespace gtsam {
@ -75,10 +76,28 @@ namespace gtsam {
/** Construct from a conditional, leaving parent and child pointers uninitialized */
BayesTreeCliqueBase(const sharedConditional& conditional) : conditional_(conditional), problemSize_(1) {}
/** Shallow copy constructor */
BayesTreeCliqueBase(const BayesTreeCliqueBase& c) : conditional_(c.conditional_), parent_(c.parent_), children(c.children), problemSize_(c.problemSize_), is_root(c.is_root) {}
/** Shallow copy assignment constructor */
BayesTreeCliqueBase& operator=(const BayesTreeCliqueBase& c) {
conditional_ = c.conditional_;
parent_ = c.parent_;
children = c.children;
problemSize_ = c.problemSize_;
is_root = c.is_root;
return *this;
}
/// @}
/// This stores the Cached separator margnal P(S)
/// This stores the Cached separator marginal P(S)
mutable boost::optional<FactorGraphType> cachedSeparatorMarginal_;
/// This protects Cached seperator marginal P(S) from concurrent read/writes
/// as many the functions which access it are const (hence the mutable)
/// leading to the false impression that these const functions are thread-safe
/// which is not true due to these mutable values. This is fixed by applying this mutex.
mutable std::mutex cachedSeparatorMarginalMutex_;
public:
sharedConditional conditional_;
@ -144,7 +163,9 @@ namespace gtsam {
void deleteCachedShortcuts();
const boost::optional<FactorGraphType>& cachedSeparatorMarginal() const {
return cachedSeparatorMarginal_; }
std::lock_guard<std::mutex> marginalLock(cachedSeparatorMarginalMutex_);
return cachedSeparatorMarginal_;
}
friend class BayesTree<DerivedType>;
@ -159,7 +180,10 @@ namespace gtsam {
KeyVector shortcut_indices(const derived_ptr& B, const FactorGraphType& p_Cp_B) const;
/** Non-recursive delete cached shortcuts and marginals - internal only. */
void deleteCachedShortcutsNonRecursive() { cachedSeparatorMarginal_ = boost::none; }
void deleteCachedShortcutsNonRecursive() {
std::lock_guard<std::mutex> marginalLock(cachedSeparatorMarginalMutex_);
cachedSeparatorMarginal_ = boost::none;
}
private:

View File

@ -167,10 +167,11 @@ gtsam::NonlinearFactor::shared_ptr CombinedImuFactor::clone() const {
//------------------------------------------------------------------------------
void CombinedImuFactor::print(const string& s,
const KeyFormatter& keyFormatter) const {
cout << s << "CombinedImuFactor(" << keyFormatter(this->key1()) << ","
<< keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) << ","
<< keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) << ","
<< keyFormatter(this->key6()) << ")\n";
cout << (s == "" ? s : s + "\n") << "CombinedImuFactor("
<< keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << ","
<< keyFormatter(this->key3()) << "," << keyFormatter(this->key4()) << ","
<< keyFormatter(this->key5()) << "," << keyFormatter(this->key6())
<< ")\n";
_PIM_.print(" preintegrated measurements:");
this->noiseModel_->print(" noise model: ");
}

View File

@ -303,7 +303,7 @@ public:
GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os,
const CombinedImuFactor&);
/// print
void print(const std::string& s, const KeyFormatter& keyFormatter =
void print(const std::string& s = "", const KeyFormatter& keyFormatter =
DefaultKeyFormatter) const override;
/// equals

View File

@ -130,9 +130,9 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor& f) {
//------------------------------------------------------------------------------
void ImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const {
cout << s << "ImuFactor(" << keyFormatter(this->key1()) << ","
<< keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) << ","
<< keyFormatter(this->key4()) << "," << keyFormatter(this->key5())
cout << (s == "" ? s : s + "\n") << "ImuFactor(" << keyFormatter(this->key1())
<< "," << keyFormatter(this->key2()) << "," << keyFormatter(this->key3())
<< "," << keyFormatter(this->key4()) << "," << keyFormatter(this->key5())
<< ")\n";
cout << *this << endl;
}
@ -226,9 +226,9 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor2& f) {
//------------------------------------------------------------------------------
void ImuFactor2::print(const string& s,
const KeyFormatter& keyFormatter) const {
cout << s << "ImuFactor2(" << keyFormatter(this->key1()) << ","
<< keyFormatter(this->key2()) << "," << keyFormatter(this->key3())
<< ")\n";
cout << (s == "" ? s : s + "\n") << "ImuFactor2("
<< keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << ","
<< keyFormatter(this->key3()) << ")\n";
cout << *this << endl;
}

Some files were not shown because too many files have changed in this diff Show More