Merge branch 'develop' into temp/copy_python

release/4.3a0
Fan Jiang 2020-08-17 14:32:20 -04:00
commit 37f42b82fa
184 changed files with 12426 additions and 2283 deletions

94
.github/scripts/python.sh vendored Normal file
View File

@ -0,0 +1,94 @@
#!/bin/bash
##########################################################
# Build and test the GTSAM Python wrapper.
##########################################################
set -x -e
if [ -z ${PYTHON_VERSION+x} ]; then
echo "Please provide the Python version to build against!"
exit 127
fi
if [ -z ${WRAPPER+x} ]; then
echo "Please provide the wrapper to build!"
exit 126
fi
PYTHON="python${PYTHON_VERSION}"
if [[ $(uname) == "Darwin" ]]; then
brew install wget
else
# Install a system package required by our library
sudo apt-get install -y wget libicu-dev python3-pip python3-setuptools
fi
PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
case $WRAPPER in
"cython")
BUILD_CYTHON="ON"
BUILD_PYBIND="OFF"
TYPEDEF_POINTS_TO_VECTORS="OFF"
sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/cython/requirements.txt
;;
"pybind")
BUILD_CYTHON="OFF"
BUILD_PYBIND="ON"
TYPEDEF_POINTS_TO_VECTORS="ON"
sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
;;
*)
exit 126
;;
esac
git submodule update --init --recursive
mkdir $GITHUB_WORKSPACE/build
cd $GITHUB_WORKSPACE/build
cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release \
-DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \
-DGTSAM_USE_QUATERNIONS=OFF \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DGTSAM_INSTALL_CYTHON_TOOLBOX=${BUILD_CYTHON} \
-DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \
-DGTSAM_TYPEDEF_POINTS_TO_VECTORS=${TYPEDEF_POINTS_TO_VECTORS} \
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
-DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \
-DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
make -j$(nproc) install &
while ps -p $! > /dev/null
do
sleep 60
now=$(date +%s)
printf "%d seconds have elapsed\n" $(( (now - start) ))
done
case $WRAPPER in
"cython")
cd $GITHUB_WORKSPACE/build/cython
$PYTHON setup.py install --user --prefix=
cd $GITHUB_WORKSPACE/build/cython/gtsam/tests
$PYTHON -m unittest discover
;;
"pybind")
cd $GITHUB_WORKSPACE/python
$PYTHON setup.py install --user --prefix=
cd $GITHUB_WORKSPACE/wrap/python/gtsam_py/tests
$PYTHON -m unittest discover
;;
*)
echo "THIS SHOULD NEVER HAPPEN!"
exit 125
;;
esac

36
.travis.sh → .github/scripts/unix.sh vendored Executable file → Normal file
View File

@ -1,20 +1,25 @@
#!/bin/bash
##########################################################
# Build and test GTSAM for *nix based systems.
# Specifically Linux and macOS.
##########################################################
# install TBB with _debug.so files
function install_tbb()
{
TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download
TBB_VERSION=4.4.2
TBB_DIR=tbb44_20151115oss
TBB_VERSION=4.4.5
TBB_DIR=tbb44_20160526oss
TBB_SAVEPATH="/tmp/tbb.tgz"
if [ "$TRAVIS_OS_NAME" == "linux" ]; then
if [ "$(uname)" == "Linux" ]; then
OS_SHORT="lin"
TBB_LIB_DIR="intel64/gcc4.4"
SUDO="sudo"
elif [ "$TRAVIS_OS_NAME" == "osx" ]; then
OS_SHORT="lin"
elif [ "$(uname)" == "Darwin" ]; then
OS_SHORT="osx"
TBB_LIB_DIR=""
SUDO=""
@ -25,7 +30,7 @@ function install_tbb()
TBBROOT=/tmp/$TBB_DIR
# Copy the needed files to the correct places.
# This works correctly for travis builds, instead of setting path variables.
# This works correctly for CI builds, instead of setting path variables.
# This is what Homebrew does to install TBB on Macs
$SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/
$SUDO cp -R $TBBROOT/include/ /usr/local/include/
@ -38,15 +43,15 @@ function configure()
set -e # Make sure any error makes the script to return an error code
set -x # echo
SOURCE_DIR=`pwd`
BUILD_DIR=build
SOURCE_DIR=$GITHUB_WORKSPACE
BUILD_DIR=$GITHUB_WORKSPACE/build
#env
git clean -fd || true
git submodule update --init --recursive
rm -fr $BUILD_DIR || true
mkdir $BUILD_DIR && cd $BUILD_DIR
install_tbb
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
if [ ! -z "$GCC_VERSION" ]; then
export CC=gcc-$GCC_VERSION
@ -61,9 +66,12 @@ function configure()
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
-DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DCMAKE_VERBOSE_MAKEFILE=OFF
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DBOOST_ROOT=$BOOST_ROOT \
-DBoost_NO_SYSTEM_PATHS=ON \
-DBoost_ARCHITECTURE=-x64
}
@ -71,7 +79,7 @@ function configure()
function finish ()
{
# Print ccache stats
ccache -s
[ -x "$(command -v ccache)" ] && ccache -s
cd $SOURCE_DIR
}
@ -111,4 +119,4 @@ case $1 in
-t)
test
;;
esac
esac

78
.github/workflows/build-linux.yml vendored Normal file
View File

@ -0,0 +1,78 @@
name: Linux CI
on: [push, pull_request]
jobs:
build:
name: ${{ matrix.name }} ${{ matrix.build_type }}
runs-on: ${{ matrix.os }}
env:
CTEST_OUTPUT_ON_FAILURE: ON
CTEST_PARALLEL_LEVEL: 2
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
strategy:
fail-fast: false
matrix:
# Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [
ubuntu-18.04-gcc-5,
ubuntu-18.04-gcc-9,
ubuntu-18.04-clang-9,
]
build_type: [Debug, Release]
build_unstable: [ON]
include:
- name: ubuntu-18.04-gcc-5
os: ubuntu-18.04
compiler: gcc
version: "5"
- name: ubuntu-18.04-gcc-9
os: ubuntu-18.04
compiler: gcc
version: "9"
- name: ubuntu-18.04-clang-9
os: ubuntu-18.04
compiler: clang
version: "9"
steps:
- name: Checkout
uses: actions/checkout@master
- name: Install (Linux)
if: runner.os == 'Linux'
run: |
# LLVM 9 is not in Bionic's repositories so we add the official LLVM repository.
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
fi
sudo apt-get -y update
sudo apt install cmake build-essential pkg-config libpython-dev python-numpy
echo "::set-env name=BOOST_ROOT::$(echo $BOOST_ROOT_1_69_0)"
echo "::set-env name=LD_LIBRARY_PATH::$(echo $BOOST_ROOT_1_69_0/lib)"
if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
echo "::set-env name=CC::gcc-${{ matrix.version }}"
echo "::set-env name=CXX::g++-${{ matrix.version }}"
else
sudo apt-get install -y clang-${{ matrix.version }} g++-multilib
echo "::set-env name=CC::clang-${{ matrix.version }}"
echo "::set-env name=CXX::clang++-${{ matrix.version }}"
fi
- name: Check Boost version
if: runner.os == 'Linux'
run: |
echo "BOOST_ROOT = $BOOST_ROOT"
- name: Build (Linux)
if: runner.os == 'Linux'
run: |
bash .github/scripts/unix.sh -t

51
.github/workflows/build-macos.yml vendored Normal file
View File

@ -0,0 +1,51 @@
name: macOS CI
on: [pull_request]
jobs:
build:
name: ${{ matrix.name }} ${{ matrix.build_type }}
runs-on: ${{ matrix.os }}
env:
CTEST_OUTPUT_ON_FAILURE: ON
CTEST_PARALLEL_LEVEL: 2
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
strategy:
fail-fast: false
matrix:
# Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [
macOS-10.15-xcode-11.3.1,
]
build_type: [Debug, Release]
build_unstable: [ON]
include:
- name: macOS-10.15-xcode-11.3.1
os: macOS-10.15
compiler: xcode
version: "11.3.1"
steps:
- name: Checkout
uses: actions/checkout@master
- name: Install (macOS)
if: runner.os == 'macOS'
run: |
brew install cmake ninja boost
if [ "${{ matrix.compiler }}" = "gcc" ]; then
brew install gcc@${{ matrix.version }}
echo "::set-env name=CC::gcc-${{ matrix.version }}"
echo "::set-env name=CXX::g++-${{ matrix.version }}"
else
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
echo "::set-env name=CC::clang"
echo "::set-env name=CXX::clang++"
fi
- name: Build (macOS)
if: runner.os == 'macOS'
run: |
bash .github/scripts/unix.sh -t

95
.github/workflows/build-python.yml vendored Normal file
View File

@ -0,0 +1,95 @@
name: Python CI
on: [pull_request]
jobs:
build:
name: ${{ matrix.name }} ${{ matrix.build_type }} Python ${{ matrix.python_version }}
runs-on: ${{ matrix.os }}
env:
CTEST_OUTPUT_ON_FAILURE: ON
CTEST_PARALLEL_LEVEL: 2
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
PYTHON_VERSION: ${{ matrix.python_version }}
WRAPPER: ${{ matrix.wrapper }}
strategy:
fail-fast: false
matrix:
# Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [
ubuntu-18.04-gcc-5,
ubuntu-18.04-gcc-9,
ubuntu-18.04-clang-9,
macOS-10.15-xcode-11.3.1,
]
build_type: [Debug, Release]
python_version: [3]
wrapper: [cython]
include:
- name: ubuntu-18.04-gcc-5
os: ubuntu-18.04
compiler: gcc
version: "5"
- name: ubuntu-18.04-gcc-9
os: ubuntu-18.04
compiler: gcc
version: "9"
- name: ubuntu-18.04-clang-9
os: ubuntu-18.04
compiler: clang
version: "9"
- name: macOS-10.15-xcode-11.3.1
os: macOS-10.15
compiler: xcode
version: "11.3.1"
steps:
- name: Checkout
uses: actions/checkout@master
- name: Install (Linux)
if: runner.os == 'Linux'
run: |
# LLVM 9 is not in Bionic's repositories so we add the official LLVM repository.
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
fi
sudo apt-get -y update
sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev
if [ "${{ matrix.compiler }}" = "gcc" ]; then
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
echo "::set-env name=CC::gcc-${{ matrix.version }}"
echo "::set-env name=CXX::g++-${{ matrix.version }}"
else
sudo apt-get install -y clang-${{ matrix.version }} g++-multilib
echo "::set-env name=CC::clang-${{ matrix.version }}"
echo "::set-env name=CXX::clang++-${{ matrix.version }}"
fi
- name: Install (macOS)
if: runner.os == 'macOS'
run: |
brew install cmake ninja boost
if [ "${{ matrix.compiler }}" = "gcc" ]; then
brew install gcc@${{ matrix.version }}
echo "::set-env name=CC::gcc-${{ matrix.version }}"
echo "::set-env name=CXX::g++-${{ matrix.version }}"
else
sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
echo "::set-env name=CC::clang"
echo "::set-env name=CXX::clang++"
fi
- name: Build (Linux)
if: runner.os == 'Linux'
run: |
bash .github/scripts/python.sh
- name: Build (macOS)
if: runner.os == 'macOS'
run: |
bash .github/scripts/python.sh

75
.github/workflows/build-windows.yml vendored Normal file
View File

@ -0,0 +1,75 @@
name: Windows CI
on: [pull_request]
jobs:
build:
name: ${{ matrix.name }} ${{ matrix.build_type }}
runs-on: ${{ matrix.os }}
env:
CTEST_OUTPUT_ON_FAILURE: ON
CTEST_PARALLEL_LEVEL: 2
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
strategy:
fail-fast: false
matrix:
# Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [
windows-2016-cl,
windows-2019-cl,
]
build_type: [Debug, Release]
build_unstable: [ON]
include:
- name: windows-2016-cl
os: windows-2016
compiler: cl
- name: windows-2019-cl
os: windows-2019
compiler: cl
steps:
- name: Checkout
uses: actions/checkout@master
- name: Install (Windows)
if: runner.os == 'Windows'
shell: powershell
run: |
Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh')
scoop install ninja --global
if ("${{ matrix.compiler }}".StartsWith("clang")) {
scoop install llvm --global
}
if ("${{ matrix.compiler }}" -eq "gcc") {
# Chocolatey GCC is broken on the windows-2019 image.
# See: https://github.com/DaanDeMeyer/doctest/runs/231595515
# See: https://github.community/t5/GitHub-Actions/Something-is-wrong-with-the-chocolatey-installed-version-of-gcc/td-p/32413
scoop install gcc --global
echo "::set-env name=CC::gcc"
echo "::set-env name=CXX::g++"
} elseif ("${{ matrix.compiler }}" -eq "clang") {
echo "::set-env name=CC::clang"
echo "::set-env name=CXX::clang++"
} else {
echo "::set-env name=CC::${{ matrix.compiler }}"
echo "::set-env name=CXX::${{ matrix.compiler }}"
}
# Scoop modifies the PATH so we make the modified PATH global.
echo "::set-env name=PATH::$env:PATH"
- name: Build (Windows)
if: runner.os == 'Windows'
run: |
cmake -E remove_directory build
echo "BOOST_ROOT_1_72_0: ${env:BOOST_ROOT_1_72_0}"
cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT_1_72_0}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT_1_72_0}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT_1_72_0}\lib"
cmake --build build --config ${{ matrix.build_type }} --target gtsam
cmake --build build --config ${{ matrix.build_type }} --target gtsam_unstable
cmake --build build --config ${{ matrix.build_type }} --target wrap
cmake --build build --config ${{ matrix.build_type }} --target check.base
cmake --build build --config ${{ matrix.build_type }} --target check.base_unstable
cmake --build build --config ${{ matrix.build_type }} --target check.linear

View File

@ -1,6 +1,9 @@
# This triggers Cython builds on `gtsam-manylinux-build`
name: Trigger Python Builds
on: push
on:
push:
branches:
- develop
jobs:
triggerCython:
runs-on: ubuntu-latest

1
.gitignore vendored
View File

@ -21,3 +21,4 @@ cython/gtsam_wrapper.pxd
/CMakeSettings.json
# for QtCreator:
CMakeLists.txt.user*
xcode/

View File

@ -1,43 +0,0 @@
#!/bin/bash
set -x -e
if [ -z ${PYTHON_VERSION+x} ]; then
echo "Please provide the Python version to build against!"
exit 127
fi
PYTHON="python${PYTHON_VERSION}"
if [[ $(uname) == "Darwin" ]]; then
brew install wget
else
# Install a system package required by our library
sudo apt-get install wget libicu-dev python3-pip python3-setuptools
fi
CURRDIR=$(pwd)
sudo $PYTHON -m pip install -r ./cython/requirements.txt
mkdir $CURRDIR/build
cd $CURRDIR/build
cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \
-DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \
-DGTSAM_USE_QUATERNIONS=OFF \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF \
-DCMAKE_INSTALL_PREFIX=$CURRDIR/../gtsam_install
make -j$(nproc) install
cd cython
sudo $PYTHON setup.py install
cd $CURRDIR/cython/gtsam/tests
$PYTHON -m unittest discover

View File

@ -1,141 +0,0 @@
language: cpp
cache: ccache
sudo: required
dist: xenial
addons:
apt:
sources:
- ubuntu-toolchain-r-test
- sourceline: 'deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-9 main'
key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
packages:
- g++-9
- clang-9
- build-essential pkg-config
- cmake
- python3-dev libpython-dev
- python3-numpy
- libboost-all-dev
# before_install:
# - if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update; fi
install:
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then HOMEBREW_NO_AUTO_UPDATE=1 brew install ccache ; fi
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then export PATH="/usr/local/opt/ccache/libexec:$PATH" ; fi
# We first do the compile stage specified below, then the matrix expansion specified after.
stages:
- compile
- test
- special
env:
global:
- MAKEFLAGS="-j2"
- CCACHE_SLOPPINESS=pch_defines,time_macros
# Compile stage without building examples/tests to populate the caches.
jobs:
# -------- STAGE 1: COMPILE -----------
include:
# on Mac, GCC
- stage: compile
os: osx
compiler: gcc
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -b
- stage: compile
os: osx
compiler: gcc
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -b
# on Mac, CLANG
- stage: compile
os: osx
compiler: clang
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -b
- stage: compile
os: osx
compiler: clang
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -b
# on Linux, GCC
- stage: compile
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -b
- stage: compile
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -b
# on Linux, CLANG
- stage: compile
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -b
- stage: compile
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -b
# on Linux, with deprecated ON to make sure that path still compiles/tests
- stage: special
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V4=ON
script: bash .travis.sh -b
# on Linux, with GTSAM_WITH_TBB on to make sure GTSAM still compiles/tests
- stage: special
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_WITH_TBB=ON
script: bash .travis.sh -t
# -------- STAGE 2: TESTS -----------
# on Mac, GCC
- stage: test
os: osx
compiler: clang
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -t
- stage: test
os: osx
compiler: clang
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -t
- stage: test
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -t
- stage: test
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -t
- stage: test
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -t
# on Linux, with quaternions ON to make sure that path still compiles/tests
- stage: special
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release GTSAM_BUILD_UNSTABLE=OFF GTSAM_USE_QUATERNIONS=ON
script: bash .travis.sh -t
- stage: special
os: linux
compiler: gcc
env: PYTHON_VERSION=3
script: bash .travis.python.sh
- stage: special
os: osx
compiler: clang
env: PYTHON_VERSION=3
script: bash .travis.python.sh

View File

@ -75,7 +75,7 @@ option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TB
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
option(GTSAM_ALLOW_DEPRECATED_SINCE_V4 "Allow use of methods/functions deprecated in GTSAM 4" ON)
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
option(GTSAM_TYPEDEF_POINTS_TO_VECTORS "Typedef Point2 and Point3 to Eigen::Vector equivalents" OFF)
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
@ -509,104 +509,104 @@ set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.43)") #Example: "libc6 (>=
# Print configuration variables
message(STATUS "===============================================================")
message(STATUS "================ Configuration Options ======================")
message(STATUS " CMAKE_CXX_COMPILER_ID type : ${CMAKE_CXX_COMPILER_ID}")
message(STATUS " CMAKE_CXX_COMPILER_VERSION : ${CMAKE_CXX_COMPILER_VERSION}")
message(STATUS " CMake version : ${CMAKE_VERSION}")
message(STATUS " CMake generator : ${CMAKE_GENERATOR}")
message(STATUS " CMake build tool : ${CMAKE_BUILD_TOOL}")
print_config("CMAKE_CXX_COMPILER_ID type" "${CMAKE_CXX_COMPILER_ID}")
print_config("CMAKE_CXX_COMPILER_VERSION" "${CMAKE_CXX_COMPILER_VERSION}")
print_config("CMake version" "${CMAKE_VERSION}")
print_config("CMake generator" "${CMAKE_GENERATOR}")
print_config("CMake build tool" "${CMAKE_BUILD_TOOL}")
message(STATUS "Build flags ")
print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ")
print_config_flag(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all' ")
print_config_flag(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'")
print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests")
print_enabled_config(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all'")
print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'")
if (DOXYGEN_FOUND)
print_config_flag(${GTSAM_BUILD_DOCS} "Build Docs ")
print_enabled_config(${GTSAM_BUILD_DOCS} "Build Docs")
endif()
print_config_flag(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries ")
print_config_flag(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name ")
print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries")
print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name")
if(GTSAM_UNSTABLE_AVAILABLE)
print_config_flag(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ")
print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable")
endif()
if(NOT MSVC AND NOT XCODE_VERSION)
print_config_flag(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ")
message(STATUS " Build type : ${CMAKE_BUILD_TYPE}")
message(STATUS " C compilation flags : ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
message(STATUS " C++ compilation flags : ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
print_enabled_config(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ")
print_config("Build type" "${CMAKE_BUILD_TYPE}")
print_config("C compilation flags" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
endif()
print_build_options_for_target(gtsam)
message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
if(GTSAM_USE_TBB)
message(STATUS " Use Intel TBB : Yes")
print_config("Use Intel TBB" "Yes")
elseif(TBB_FOUND)
message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled")
print_config("Use Intel TBB" "TBB found but GTSAM_WITH_TBB is disabled")
else()
message(STATUS " Use Intel TBB : TBB not found")
print_config("Use Intel TBB" "TBB not found")
endif()
if(GTSAM_USE_EIGEN_MKL)
message(STATUS " Eigen will use MKL : Yes")
print_config("Eigen will use MKL" "Yes")
elseif(MKL_FOUND)
message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
print_config("Eigen will use MKL" "MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
else()
message(STATUS " Eigen will use MKL : MKL not found")
print_config("Eigen will use MKL" "MKL not found")
endif()
if(GTSAM_USE_EIGEN_MKL_OPENMP)
message(STATUS " Eigen will use MKL and OpenMP : Yes")
print_config("Eigen will use MKL and OpenMP" "Yes")
elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL)
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
elseif(OPENMP_FOUND AND NOT MKL_FOUND)
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found")
print_config("Eigen will use MKL and OpenMP" "OpenMP found but MKL not found")
elseif(OPENMP_FOUND)
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
else()
message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found")
print_config("Eigen will use MKL and OpenMP" "OpenMP not found")
endif()
message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
print_config("Default allocator" "${GTSAM_DEFAULT_ALLOCATOR}")
if(GTSAM_THROW_CHEIRALITY_EXCEPTION)
message(STATUS " Cheirality exceptions enabled : YES")
print_config("Cheirality exceptions enabled" "YES")
else()
message(STATUS " Cheirality exceptions enabled : NO")
print_config("Cheirality exceptions enabled" "NO")
endif()
if(NOT MSVC AND NOT XCODE_VERSION)
if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
message(STATUS " Build with ccache : Yes")
print_config("Build with ccache" "Yes")
elseif(CCACHE_FOUND)
message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
else()
message(STATUS " Build with ccache : No")
print_config("Build with ccache" "No")
endif()
endif()
message(STATUS "Packaging flags ")
message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")
message(STATUS " CPack Generator : ${CPACK_GENERATOR}")
message(STATUS "Packaging flags")
print_config("CPack Source Generator" "${CPACK_SOURCE_GENERATOR}")
print_config("CPack Generator" "${CPACK_GENERATOR}")
message(STATUS "GTSAM flags ")
print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
print_config_flag(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ")
print_config_flag(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
print_config_flag(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4} "Deprecated in GTSAM 4 allowed ")
print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ")
print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1")
print_enabled_config(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
print_enabled_config(${GTSAM_BUILD_WRAP} "Build Wrap ")
message(STATUS "MATLAB toolbox flags ")
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ")
message(STATUS "MATLAB toolbox flags")
print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ")
if (${GTSAM_INSTALL_MATLAB_TOOLBOX})
message(STATUS " MATLAB root : ${MATLAB_ROOT}")
message(STATUS " MEX binary : ${MEX_COMMAND}")
print_config("MATLAB root" "${MATLAB_ROOT}")
print_config("MEX binary" "${MEX_COMMAND}")
endif()
message(STATUS "Cython toolbox flags ")
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
print_enabled_config(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
if(GTSAM_INSTALL_CYTHON_TOOLBOX)
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
print_config("Python version" "${GTSAM_PYTHON_VERSION}")
endif()
message(STATUS "===============================================================")

View File

@ -23,3 +23,5 @@ ordering library
- Included unmodified in gtsam/3rdparty/metis
- Licenced under Apache License v 2.0, provided in
gtsam/3rdparty/metis/LICENSE.txt
- Spectra v0.9.0: Sparse Eigenvalue Computation Toolkit as a Redesigned ARPACK.
- Licenced under MPL2, provided at https://github.com/yixuan/spectra

View File

@ -1,5 +1,7 @@
# README - Georgia Tech Smoothing and Mapping Library
**As of August 1, develop is officially in "Pre 4.1" mode, and features deprecated in 4.0 were removed. Use the last 4.0.3 release if you need those features. However, most are easily converted and can be tracked down (in 4.0.3) by disabling the cmake flag GTSAM_ALLOW_DEPRECATED_SINCE_V4**
## What is GTSAM?
GTSAM is a C++ library that implements smoothing and
@ -44,9 +46,12 @@ Optional prerequisites - used automatically if findable by CMake:
## GTSAM 4 Compatibility
GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. We also deprecate some legacy functionality and wrongly named methods, but by default the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 is enabled, allowing anyone to just pull V4 and compile. To build the python toolbox, however, you will have to explicitly disable that flag.
GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. It also introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect.
GTSAM 4 also deprecated some legacy functionality and wrongly named methods. If you are on a 4.0.X release, you can define the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 to use the deprecated methods.
GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionality. There is a flag GTSAM_ALLOW_DEPRECATED_SINCE_V41 for newly deprecated methods since the 4.1 release, which is on by default, allowing anyone to just pull version 4.1 and compile.
Also, GTSAM 4 introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect.
## Wrappers

View File

@ -1,33 +0,0 @@
# version format
version: 4.0.3-{branch}-build{build}
os: Visual Studio 2019
clone_folder: c:\projects\gtsam
platform: x64
configuration: Release
environment:
CTEST_OUTPUT_ON_FAILURE: 1
BOOST_ROOT: C:/Libraries/boost_1_71_0
build_script:
- cd c:\projects\gtsam\build
# As of Dec 2019, not all unit tests build cleanly for MSVC, so we'll just
# check that parts of GTSAM build correctly:
#- cmake --build .
- cmake --build . --config Release --target gtsam
- cmake --build . --config Release --target gtsam_unstable
- cmake --build . --config Release --target wrap
#- cmake --build . --target check
- cmake --build . --config Release --target check.base
- cmake --build . --config Release --target check.base_unstable
- cmake --build . --config Release --target check.linear
before_build:
- cd c:\projects\gtsam
- mkdir build
- cd build
# Disable examples to avoid AppVeyor timeout
- cmake -G "Visual Studio 16 2019" .. -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF

View File

@ -104,7 +104,14 @@ if(MSVC)
set(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING /MD /O2 CACHE STRING "(User editable) Private compiler flags for Timing configuration.")
else()
# Common to all configurations, next for each configuration:
if (
((CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12.0.0)) OR
(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
)
set(flag_override_ -Wsuggest-override) # -Werror=suggest-override: Add again someday
endif()
set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON
-Wall # Enable common warnings
-fPIC # ensure proper code generation for shared libraries
@ -112,7 +119,7 @@ else()
$<$<CXX_COMPILER_ID:Clang>:-Wreturn-stack-address -Werror=return-stack-address> # Error: return local address
-Wreturn-type -Werror=return-type # Error on missing return()
-Wformat -Werror=format-security # Error on wrong printf() arguments
$<$<COMPILE_LANGUAGE:CXX>:-Wsuggest-override -Werror=suggest-override> # Enforce the use of the override keyword
$<$<COMPILE_LANGUAGE:CXX>:${flag_override_}> # Enforce the use of the override keyword
#
CACHE STRING "(User editable) Private compiler flags for all configurations.")
set(GTSAM_COMPILE_OPTIONS_PRIVATE_DEBUG -g -fno-inline CACHE STRING "(User editable) Private compiler flags for Debug configuration.")

View File

@ -1,14 +1,3 @@
# print configuration variables
# Usage:
#print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ")
function(print_config_flag flag msg)
if (flag)
message(STATUS " ${msg}: Enabled")
else ()
message(STATUS " ${msg}: Disabled")
endif ()
endfunction()
# Based on https://github.com/jimbraun/XCDF/blob/master/cmake/CMakePadString.cmake
function(string_pad RESULT_NAME DESIRED_LENGTH VALUE)
string(LENGTH "${VALUE}" VALUE_LENGTH)
@ -26,6 +15,27 @@ endfunction()
set(GTSAM_PRINT_SUMMARY_PADDING_LENGTH 50 CACHE STRING "Padding of cmake summary report lines after configuring.")
mark_as_advanced(GTSAM_PRINT_SUMMARY_PADDING_LENGTH)
# print configuration variables with automatic padding
# Usage:
# print_config(${GTSAM_BUILD_TESTS} "Build Tests")
function(print_config config msg)
string_pad(padded_config ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${config}")
message(STATUS "${padded_config}: ${msg}")
endfunction()
# print configuration variable with enabled/disabled value
# Usage:
# print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests ")
function(print_enabled_config config msg)
string_pad(padded_msg ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${msg}")
if (config)
message(STATUS "${padded_msg}: Enabled")
else ()
message(STATUS "${padded_msg}: Disabled")
endif ()
endfunction()
# Print " var: ${var}" padding with spaces as needed
function(print_padded variable_name)
string_pad(padded_prop ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${variable_name}")

View File

@ -13,7 +13,7 @@ import unittest
import numpy as np
from gtsam import (Rot3, SO3, SO4, FrobeniusBetweenFactorSO4, FrobeniusFactorSO4,
FrobeniusWormholeFactor, SOn)
ShonanFactor3, SOn)
id = SO4()
v1 = np.array([0, 0, 0, 0.1, 0, 0])
@ -43,7 +43,7 @@ class TestFrobeniusFactorSO4(unittest.TestCase):
"""Test creation of a factor that calculates Shonan error."""
R1 = SO3.Expmap(v1[3:])
R2 = SO3.Expmap(v2[3:])
factor = FrobeniusWormholeFactor(1, 2, Rot3(R1.between(R2).matrix()), p=4)
factor = ShonanFactor3(1, 2, Rot3(R1.between(R2).matrix()), p=4)
I4 = SOn(4)
Q1 = I4.retract(v1)
Q2 = I4.retract(v2)

View File

@ -155,7 +155,6 @@ def plot_pose2(fignum, pose, axis_length=0.1, covariance=None,
axes.set_xlabel(axis_labels[0])
axes.set_ylabel(axis_labels[1])
axes.set_zlabel(axis_labels[2])
return fig

View File

@ -30,8 +30,8 @@ class GroundTruth:
def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4):
self.K = K
self.cameras = [Pose3()] * nrCameras
self.points = [Point3()] * nrPoints
self.cameras = [gtsam.Pose3()] * nrCameras
self.points = [gtsam.Point3(0, 0, 0)] * nrPoints
def print_(self, s=""):
print(s)
@ -99,11 +99,11 @@ def generate_data(options):
r = 40
for i in range(options.nrCameras):
theta = i * 2 * np.pi / options.nrCameras
t = Point3(r * np.cos(theta), r * np.sin(theta), height)
truth.cameras[i] = PinholeCameraCal3_S2.Lookat(t,
Point3(),
Point3(0, 0, 1),
truth.K)
t = gtsam.Point3(r * np.cos(theta), r * np.sin(theta), height)
truth.cameras[i] = gtsam.SimpleCamera.Lookat(t,
gtsam.Point3(0, 0, 0),
gtsam.Point3(0, 0, 1),
truth.K)
# Create measurements
for j in range(nrPoints):
# All landmarks seen in every frame

View File

@ -1,6 +1,6 @@
VERTEX_SE3:QUAT 0 -3.865747774038187 0.06639337702667497 -0.16064874691945374 0.024595211709139555 0.49179523413089893 -0.06279232989379242 0.8680954132776109
VERTEX_SE3:QUAT 1 -3.614793159814815 0.04774490041587656 -0.2837650367985949 0.00991721787943912 0.4854918961891193 -0.042343290945895576 0.8731588132957809
VERTEX_SE3:QUAT 2 -3.255096913553434 0.013296754286114112 -0.5339792269680574 -0.027851108010665374 0.585478168397957 -0.05088341463532465 0.8086102325762403
EDGE_SE3:QUAT 0 1 0.2509546142233723 -0.01864847661079841 -0.12311628987914114 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
EDGE_SE3:QUAT 0 2 0.6106508604847534 -0.05309662274056086 -0.3733304800486037 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
EDGE_SE3:QUAT 1 2 0.3596962462613811 -0.03444814612976245 -0.25021419016946256 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
VERTEX_SE3:QUAT 0 -1.6618596980158338 -0.5736497760548741 -3.3319774096611026 -0.02676080288219576 -0.024497002638379624 -0.015064701622500615 0.9992281076190063
VERTEX_SE3:QUAT 1 -1.431820463019384 -0.549139761976065 -3.160677992237872 -0.049543805396343954 -0.03232420352077356 -0.004386230477751116 0.998239108728862
VERTEX_SE3:QUAT 2 -1.0394840214436651 -0.5268841046291037 -2.972143862665523 -0.07993768981394891 0.0825062894866454 -0.04088089479075661 0.9925378735259738
EDGE_SE3:QUAT 0 1 0.23003923499644974 0.02451001407880915 0.17129941742323052 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
EDGE_SE3:QUAT 0 2 0.6223756765721686 0.04676567142577037 0.35983354699557957 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
EDGE_SE3:QUAT 1 2 0.3923364415757189 0.022255657346961222 0.18853412957234905 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0

View File

@ -1,16 +1,16 @@
VERTEX_SE3:QUAT 8646911284551352320 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162
VERTEX_SE3:QUAT 8646911284551352321 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508
VERTEX_SE3:QUAT 8646911284551352322 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10
VERTEX_SE3:QUAT 8646911284551352323 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508
VERTEX_SE3:QUAT 8646911284551352324 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162
VERTEX_SE3:QUAT 8646911284551352325 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567
VERTEX_SE3:QUAT 8646911284551352326 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412
VERTEX_SE3:QUAT 8646911284551352327 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567
VERTEX_TRACKXYZ 7782220156096217088 10 10 10
VERTEX_TRACKXYZ 7782220156096217089 -10 10 10
VERTEX_TRACKXYZ 7782220156096217090 -10 -10 10
VERTEX_TRACKXYZ 7782220156096217091 10 -10 10
VERTEX_TRACKXYZ 7782220156096217092 10 10 -10
VERTEX_TRACKXYZ 7782220156096217093 -10 10 -10
VERTEX_TRACKXYZ 7782220156096217094 -10 -10 -10
VERTEX_TRACKXYZ 7782220156096217095 10 -10 -10
VERTEX_SE3:QUAT 0 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162
VERTEX_SE3:QUAT 1 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508
VERTEX_SE3:QUAT 2 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10
VERTEX_SE3:QUAT 3 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508
VERTEX_SE3:QUAT 4 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162
VERTEX_SE3:QUAT 5 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567
VERTEX_SE3:QUAT 6 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412
VERTEX_SE3:QUAT 7 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567
VERTEX_TRACKXYZ 0 10 10 10
VERTEX_TRACKXYZ 1 -10 10 10
VERTEX_TRACKXYZ 2 -10 -10 10
VERTEX_TRACKXYZ 3 10 -10 10
VERTEX_TRACKXYZ 4 10 10 -10
VERTEX_TRACKXYZ 5 -10 10 -10
VERTEX_TRACKXYZ 6 -10 -10 -10
VERTEX_TRACKXYZ 7 10 -10 -10

155
gtsam.h
View File

@ -2369,6 +2369,7 @@ virtual class NonlinearOptimizer {
double error() const;
int iterations() const;
gtsam::Values values() const;
gtsam::NonlinearFactorGraph graph() const;
gtsam::GaussianFactorGraph* iterate() const;
};
@ -2568,10 +2569,12 @@ virtual class BetweenFactor : gtsam::NoiseModelFactor {
void serialize() const;
};
#include <gtsam/nonlinear/NonlinearEquality.h>
template<T = {gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2, gtsam::SO3, gtsam::SO4, gtsam::SOn, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::Cal3_S2, gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::PinholeCameraCal3_S2, gtsam::imuBias::ConstantBias}>
template <T = {gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2,
gtsam::SO3, gtsam::SO4, gtsam::SOn, gtsam::Rot3, gtsam::Pose2,
gtsam::Pose3, gtsam::Cal3_S2, gtsam::CalibratedCamera,
gtsam::SimpleCamera, gtsam::PinholeCameraCal3_S2,
gtsam::imuBias::ConstantBias}>
virtual class NonlinearEquality : gtsam::NoiseModelFactor {
// Constructor - forces exact evaluation
NonlinearEquality(size_t j, const T& feasible);
@ -2582,7 +2585,6 @@ virtual class NonlinearEquality : gtsam::NoiseModelFactor {
void serialize() const;
};
#include <gtsam/sam/RangeFactor.h>
template<POSE, POINT>
virtual class RangeFactor : gtsam::NoiseModelFactor {
@ -2853,7 +2855,7 @@ virtual class KarcherMeanFactor : gtsam::NonlinearFactor {
};
#include <gtsam/slam/FrobeniusFactor.h>
gtsam::noiseModel::Isotropic* ConvertPose3NoiseModel(
gtsam::noiseModel::Isotropic* ConvertNoiseModel(
gtsam::noiseModel::Base* model, size_t d);
template<T = {gtsam::SO3, gtsam::SO4}>
@ -2872,12 +2874,144 @@ virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor {
Vector evaluateError(const T& R1, const T& R2);
};
virtual class FrobeniusWormholeFactor : gtsam::NoiseModelFactor {
FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12,
#include <gtsam/sfm/ShonanFactor.h>
virtual class ShonanFactor3 : gtsam::NoiseModelFactor {
ShonanFactor(size_t key1, size_t key2, const gtsam::Rot3 &R12,
size_t p);
FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12,
size_t p, gtsam::noiseModel::Base* model);
Vector evaluateError(const gtsam::SOn& Q1, const gtsam::SOn& Q2);
ShonanFactor(size_t key1, size_t key2, const gtsam::Rot3 &R12,
size_t p, gtsam::noiseModel::Base *model);
Vector evaluateError(const gtsam::SOn &Q1, const gtsam::SOn &Q2);
};
#include <gtsam/sfm/BinaryMeasurement.h>
template<T>
class BinaryMeasurement {
BinaryMeasurement(size_t key1, size_t key2, const T& measured,
const gtsam::noiseModel::Base* model);
size_t key1() const;
size_t key2() const;
T measured() const;
};
typedef gtsam::BinaryMeasurement<gtsam::Unit3> BinaryMeasurementUnit3;
typedef gtsam::BinaryMeasurement<gtsam::Rot3> BinaryMeasurementRot3;
#include <gtsam/sfm/ShonanAveraging.h>
// TODO(frank): copy/pasta below until we have integer template paremeters in wrap!
class ShonanAveragingParameters2 {
ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm);
ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm, string method);
gtsam::LevenbergMarquardtParams getLMParams() const;
void setOptimalityThreshold(double value);
double getOptimalityThreshold() const;
void setAnchor(size_t index, const gtsam::Rot2& value);
void setAnchorWeight(double value);
double getAnchorWeight() const;
void setKarcherWeight(double value);
double getKarcherWeight();
void setGaugesWeight(double value);
double getGaugesWeight();
};
class ShonanAveragingParameters3 {
ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm);
ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm, string method);
gtsam::LevenbergMarquardtParams getLMParams() const;
void setOptimalityThreshold(double value);
double getOptimalityThreshold() const;
void setAnchor(size_t index, const gtsam::Rot3& value);
void setAnchorWeight(double value);
double getAnchorWeight() const;
void setKarcherWeight(double value);
double getKarcherWeight();
void setGaugesWeight(double value);
double getGaugesWeight();
};
class ShonanAveraging2 {
ShonanAveraging2(string g2oFile);
ShonanAveraging2(string g2oFile,
const gtsam::ShonanAveragingParameters2 &parameters);
// Query properties
size_t nrUnknowns() const;
size_t nrMeasurements() const;
gtsam::Rot2 measured(size_t i);
gtsam::KeyVector keys(size_t i);
// Matrix API (advanced use, debugging)
Matrix denseD() const;
Matrix denseQ() const;
Matrix denseL() const;
// Matrix computeLambda_(Matrix S) const;
Matrix computeLambda_(const gtsam::Values& values) const;
Matrix computeA_(const gtsam::Values& values) const;
double computeMinEigenValue(const gtsam::Values& values) const;
gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values,
const Vector& minEigenVector, double minEigenValue) const;
// Advanced API
gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const;
double costAt(size_t p, const gtsam::Values& values) const;
pair<double, Vector> computeMinEigenVector(const gtsam::Values& values) const;
bool checkOptimality(const gtsam::Values& values) const;
gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial);
// gtsam::Values tryOptimizingAt(size_t p) const;
gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const;
gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const;
gtsam::Values roundSolution(const gtsam::Values& values) const;
// Basic API
double cost(const gtsam::Values& values) const;
gtsam::Values initializeRandomly() const;
pair<gtsam::Values, double> run(const gtsam::Values& initial, size_t min_p, size_t max_p) const;
};
class ShonanAveraging3 {
ShonanAveraging3(string g2oFile);
ShonanAveraging3(string g2oFile,
const gtsam::ShonanAveragingParameters3 &parameters);
// TODO(frank): deprecate once we land pybind wrapper
ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors);
ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors,
const gtsam::ShonanAveragingParameters3 &parameters);
// Query properties
size_t nrUnknowns() const;
size_t nrMeasurements() const;
gtsam::Rot3 measured(size_t i);
gtsam::KeyVector keys(size_t i);
// Matrix API (advanced use, debugging)
Matrix denseD() const;
Matrix denseQ() const;
Matrix denseL() const;
// Matrix computeLambda_(Matrix S) const;
Matrix computeLambda_(const gtsam::Values& values) const;
Matrix computeA_(const gtsam::Values& values) const;
double computeMinEigenValue(const gtsam::Values& values) const;
gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values,
const Vector& minEigenVector, double minEigenValue) const;
// Advanced API
gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const;
double costAt(size_t p, const gtsam::Values& values) const;
pair<double, Vector> computeMinEigenVector(const gtsam::Values& values) const;
bool checkOptimality(const gtsam::Values& values) const;
gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial);
// gtsam::Values tryOptimizingAt(size_t p) const;
gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const;
gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const;
gtsam::Values roundSolution(const gtsam::Values& values) const;
// Basic API
double cost(const gtsam::Values& values) const;
gtsam::Values initializeRandomly() const;
pair<gtsam::Values, double> run(const gtsam::Values& initial, size_t min_p, size_t max_p) const;
};
//*************************************************************************
@ -2996,6 +3130,7 @@ class PreintegratedImuMeasurements {
void resetIntegrationAndSetBias(const gtsam::imuBias::ConstantBias& biasHat);
Matrix preintMeasCov() const;
Vector preintegrated() const;
double deltaTij() const;
gtsam::Rot3 deltaRij() const;
Vector deltaPij() const;

View File

@ -1,7 +1,8 @@
# install CCOLAMD headers
# install CCOLAMD and SuiteSparse_config headers
install(FILES CCOLAMD/Include/ccolamd.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/CCOLAMD)
install(FILES SuiteSparse_config/SuiteSparse_config.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/SuiteSparse_config)
# install Eigen unless System Eigen is used
if(NOT GTSAM_USE_SYSTEM_EIGEN)
# Find plain .h files
file(GLOB_RECURSE eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/Eigen/*.h")

482
gtsam/3rdparty/Spectra/GenEigsBase.h vendored Normal file
View File

@ -0,0 +1,482 @@
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef GEN_EIGS_BASE_H
#define GEN_EIGS_BASE_H
#include <Eigen/Core>
#include <vector> // std::vector
#include <cmath> // std::abs, std::pow, std::sqrt
#include <algorithm> // std::min, std::copy
#include <complex> // std::complex, std::conj, std::norm, std::abs
#include <stdexcept> // std::invalid_argument
#include "Util/TypeTraits.h"
#include "Util/SelectionRule.h"
#include "Util/CompInfo.h"
#include "Util/SimpleRandom.h"
#include "MatOp/internal/ArnoldiOp.h"
#include "LinAlg/UpperHessenbergQR.h"
#include "LinAlg/DoubleShiftQR.h"
#include "LinAlg/UpperHessenbergEigen.h"
#include "LinAlg/Arnoldi.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This is the base class for general eigen solvers, mainly for internal use.
/// It is kept here to provide the documentation for member functions of concrete eigen solvers
/// such as GenEigsSolver and GenEigsRealShiftSolver.
///
template <typename Scalar,
int SelectionRule,
typename OpType,
typename BOpType>
class GenEigsBase
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> Array;
typedef Eigen::Array<bool, Eigen::Dynamic, 1> BoolArray;
typedef Eigen::Map<Matrix> MapMat;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::Map<const Vector> MapConstVec;
typedef std::complex<Scalar> Complex;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
typedef ArnoldiOp<Scalar, OpType, BOpType> ArnoldiOpType;
typedef Arnoldi<Scalar, ArnoldiOpType> ArnoldiFac;
protected:
// clang-format off
OpType* m_op; // object to conduct matrix operation,
// e.g. matrix-vector product
const Index m_n; // dimension of matrix A
const Index m_nev; // number of eigenvalues requested
const Index m_ncv; // dimension of Krylov subspace in the Arnoldi method
Index m_nmatop; // number of matrix operations called
Index m_niter; // number of restarting iterations
ArnoldiFac m_fac; // Arnoldi factorization
ComplexVector m_ritz_val; // Ritz values
ComplexMatrix m_ritz_vec; // Ritz vectors
ComplexVector m_ritz_est; // last row of m_ritz_vec
private:
BoolArray m_ritz_conv; // indicator of the convergence of Ritz values
int m_info; // status of the computation
const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow
// ~= 1e-307 for the "double" type
const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type
const Scalar m_eps23; // m_eps^(2/3), used to test the convergence
// clang-format on
// Real Ritz values calculated from UpperHessenbergEigen have exact zero imaginary part
// Complex Ritz values have exact conjugate pairs
// So we use exact tests here
static bool is_complex(const Complex& v) { return v.imag() != Scalar(0); }
static bool is_conj(const Complex& v1, const Complex& v2) { return v1 == Eigen::numext::conj(v2); }
// Implicitly restarted Arnoldi factorization
void restart(Index k)
{
using std::norm;
if (k >= m_ncv)
return;
DoubleShiftQR<Scalar> decomp_ds(m_ncv);
UpperHessenbergQR<Scalar> decomp_hb(m_ncv);
Matrix Q = Matrix::Identity(m_ncv, m_ncv);
for (Index i = k; i < m_ncv; i++)
{
if (is_complex(m_ritz_val[i]) && is_conj(m_ritz_val[i], m_ritz_val[i + 1]))
{
// H - mu * I = Q1 * R1
// H <- R1 * Q1 + mu * I = Q1' * H * Q1
// H - conj(mu) * I = Q2 * R2
// H <- R2 * Q2 + conj(mu) * I = Q2' * H * Q2
//
// (H - mu * I) * (H - conj(mu) * I) = Q1 * Q2 * R2 * R1 = Q * R
const Scalar s = Scalar(2) * m_ritz_val[i].real();
const Scalar t = norm(m_ritz_val[i]);
decomp_ds.compute(m_fac.matrix_H(), s, t);
// Q -> Q * Qi
decomp_ds.apply_YQ(Q);
// H -> Q'HQ
// Matrix Q = Matrix::Identity(m_ncv, m_ncv);
// decomp_ds.apply_YQ(Q);
// m_fac_H = Q.transpose() * m_fac_H * Q;
m_fac.compress_H(decomp_ds);
i++;
}
else
{
// QR decomposition of H - mu * I, mu is real
decomp_hb.compute(m_fac.matrix_H(), m_ritz_val[i].real());
// Q -> Q * Qi
decomp_hb.apply_YQ(Q);
// H -> Q'HQ = RQ + mu * I
m_fac.compress_H(decomp_hb);
}
}
m_fac.compress_V(Q);
m_fac.factorize_from(k, m_ncv, m_nmatop);
retrieve_ritzpair();
}
// Calculates the number of converged Ritz values
Index num_converged(Scalar tol)
{
// thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value
Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23);
Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm();
// Converged "wanted" Ritz values
m_ritz_conv = (resid < thresh);
return m_ritz_conv.cast<Index>().sum();
}
// Returns the adjusted nev for restarting
Index nev_adjusted(Index nconv)
{
using std::abs;
Index nev_new = m_nev;
for (Index i = m_nev; i < m_ncv; i++)
if (abs(m_ritz_est[i]) < m_near_0)
nev_new++;
// Adjust nev_new, according to dnaup2.f line 660~674 in ARPACK
nev_new += std::min(nconv, (m_ncv - nev_new) / 2);
if (nev_new == 1 && m_ncv >= 6)
nev_new = m_ncv / 2;
else if (nev_new == 1 && m_ncv > 3)
nev_new = 2;
if (nev_new > m_ncv - 2)
nev_new = m_ncv - 2;
// Increase nev by one if ritz_val[nev - 1] and
// ritz_val[nev] are conjugate pairs
if (is_complex(m_ritz_val[nev_new - 1]) &&
is_conj(m_ritz_val[nev_new - 1], m_ritz_val[nev_new]))
{
nev_new++;
}
return nev_new;
}
// Retrieves and sorts Ritz values and Ritz vectors
void retrieve_ritzpair()
{
UpperHessenbergEigen<Scalar> decomp(m_fac.matrix_H());
const ComplexVector& evals = decomp.eigenvalues();
ComplexMatrix evecs = decomp.eigenvectors();
SortEigenvalue<Complex, SelectionRule> sorting(evals.data(), evals.size());
std::vector<int> ind = sorting.index();
// Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively
for (Index i = 0; i < m_ncv; i++)
{
m_ritz_val[i] = evals[ind[i]];
m_ritz_est[i] = evecs(m_ncv - 1, ind[i]);
}
for (Index i = 0; i < m_nev; i++)
{
m_ritz_vec.col(i).noalias() = evecs.col(ind[i]);
}
}
protected:
// Sorts the first nev Ritz pairs in the specified order
// This is used to return the final results
virtual void sort_ritzpair(int sort_rule)
{
// First make sure that we have a valid index vector
SortEigenvalue<Complex, LARGEST_MAGN> sorting(m_ritz_val.data(), m_nev);
std::vector<int> ind = sorting.index();
switch (sort_rule)
{
case LARGEST_MAGN:
break;
case LARGEST_REAL:
{
SortEigenvalue<Complex, LARGEST_REAL> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
case LARGEST_IMAG:
{
SortEigenvalue<Complex, LARGEST_IMAG> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
case SMALLEST_MAGN:
{
SortEigenvalue<Complex, SMALLEST_MAGN> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
case SMALLEST_REAL:
{
SortEigenvalue<Complex, SMALLEST_REAL> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
case SMALLEST_IMAG:
{
SortEigenvalue<Complex, SMALLEST_IMAG> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
default:
throw std::invalid_argument("unsupported sorting rule");
}
ComplexVector new_ritz_val(m_ncv);
ComplexMatrix new_ritz_vec(m_ncv, m_nev);
BoolArray new_ritz_conv(m_nev);
for (Index i = 0; i < m_nev; i++)
{
new_ritz_val[i] = m_ritz_val[ind[i]];
new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]);
new_ritz_conv[i] = m_ritz_conv[ind[i]];
}
m_ritz_val.swap(new_ritz_val);
m_ritz_vec.swap(new_ritz_vec);
m_ritz_conv.swap(new_ritz_conv);
}
public:
/// \cond
GenEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) :
m_op(op),
m_n(m_op->rows()),
m_nev(nev),
m_ncv(ncv > m_n ? m_n : ncv),
m_nmatop(0),
m_niter(0),
m_fac(ArnoldiOpType(op, Bop), m_ncv),
m_info(NOT_COMPUTED),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
m_eps(Eigen::NumTraits<Scalar>::epsilon()),
m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3))
{
if (nev < 1 || nev > m_n - 2)
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 2, n is the size of matrix");
if (ncv < nev + 2 || ncv > m_n)
throw std::invalid_argument("ncv must satisfy nev + 2 <= ncv <= n, n is the size of matrix");
}
///
/// Virtual destructor
///
virtual ~GenEigsBase() {}
/// \endcond
///
/// Initializes the solver by providing an initial residual vector.
///
/// \param init_resid Pointer to the initial residual vector.
///
/// **Spectra** (and also **ARPACK**) uses an iterative algorithm
/// to find eigenvalues. This function allows the user to provide the initial
/// residual vector.
///
void init(const Scalar* init_resid)
{
// Reset all matrices/vectors to zero
m_ritz_val.resize(m_ncv);
m_ritz_vec.resize(m_ncv, m_nev);
m_ritz_est.resize(m_ncv);
m_ritz_conv.resize(m_nev);
m_ritz_val.setZero();
m_ritz_vec.setZero();
m_ritz_est.setZero();
m_ritz_conv.setZero();
m_nmatop = 0;
m_niter = 0;
// Initialize the Arnoldi factorization
MapConstVec v0(init_resid, m_n);
m_fac.init(v0, m_nmatop);
}
///
/// Initializes the solver by providing a random initial residual vector.
///
/// This overloaded function generates a random initial residual vector
/// (with a fixed random seed) for the algorithm. Elements in the vector
/// follow independent Uniform(-0.5, 0.5) distribution.
///
void init()
{
SimpleRandom<Scalar> rng(0);
Vector init_resid = rng.random_vec(m_n);
init(init_resid.data());
}
///
/// Conducts the major computation procedure.
///
/// \param maxit Maximum number of iterations allowed in the algorithm.
/// \param tol Precision parameter for the calculated eigenvalues.
/// \param sort_rule Rule to sort the eigenvalues and eigenvectors.
/// Supported values are
/// `Spectra::LARGEST_MAGN`, `Spectra::LARGEST_REAL`,
/// `Spectra::LARGEST_IMAG`, `Spectra::SMALLEST_MAGN`,
/// `Spectra::SMALLEST_REAL` and `Spectra::SMALLEST_IMAG`,
/// for example `LARGEST_MAGN` indicates that eigenvalues
/// with largest magnitude come first.
/// Note that this argument is only used to
/// **sort** the final result, and the **selection** rule
/// (e.g. selecting the largest or smallest eigenvalues in the
/// full spectrum) is specified by the template parameter
/// `SelectionRule` of GenEigsSolver.
///
/// \return Number of converged eigenvalues.
///
Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_MAGN)
{
// The m-step Arnoldi factorization
m_fac.factorize_from(1, m_ncv, m_nmatop);
retrieve_ritzpair();
// Restarting
Index i, nconv = 0, nev_adj;
for (i = 0; i < maxit; i++)
{
nconv = num_converged(tol);
if (nconv >= m_nev)
break;
nev_adj = nev_adjusted(nconv);
restart(nev_adj);
}
// Sorting results
sort_ritzpair(sort_rule);
m_niter += i + 1;
m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING;
return std::min(m_nev, nconv);
}
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
int info() const { return m_info; }
///
/// Returns the number of iterations used in the computation.
///
Index num_iterations() const { return m_niter; }
///
/// Returns the number of matrix operations used in the computation.
///
Index num_operations() const { return m_nmatop; }
///
/// Returns the converged eigenvalues.
///
/// \return A complex-valued vector containing the eigenvalues.
/// Returned vector type will be `Eigen::Vector<std::complex<Scalar>, ...>`, depending on
/// the template parameter `Scalar` defined.
///
ComplexVector eigenvalues() const
{
const Index nconv = m_ritz_conv.cast<Index>().sum();
ComplexVector res(nconv);
if (!nconv)
return res;
Index j = 0;
for (Index i = 0; i < m_nev; i++)
{
if (m_ritz_conv[i])
{
res[j] = m_ritz_val[i];
j++;
}
}
return res;
}
///
/// Returns the eigenvectors associated with the converged eigenvalues.
///
/// \param nvec The number of eigenvectors to return.
///
/// \return A complex-valued matrix containing the eigenvectors.
/// Returned matrix type will be `Eigen::Matrix<std::complex<Scalar>, ...>`,
/// depending on the template parameter `Scalar` defined.
///
ComplexMatrix eigenvectors(Index nvec) const
{
const Index nconv = m_ritz_conv.cast<Index>().sum();
nvec = std::min(nvec, nconv);
ComplexMatrix res(m_n, nvec);
if (!nvec)
return res;
ComplexMatrix ritz_vec_conv(m_ncv, nvec);
Index j = 0;
for (Index i = 0; i < m_nev && j < nvec; i++)
{
if (m_ritz_conv[i])
{
ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i);
j++;
}
}
res.noalias() = m_fac.matrix_V() * ritz_vec_conv;
return res;
}
///
/// Returns all converged eigenvectors.
///
ComplexMatrix eigenvectors() const
{
return eigenvectors(m_nev);
}
};
} // namespace Spectra
#endif // GEN_EIGS_BASE_H

View File

@ -0,0 +1,157 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
#define GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
#include <Eigen/Core>
#include "GenEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseGenComplexShiftSolve.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for general real matrices with
/// a complex shift value in the **shift-and-invert mode**. The background
/// knowledge of the shift-and-invert mode can be found in the documentation
/// of the SymEigsShiftSolver class.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the shifted-and-inverted eigenvalues.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the DenseGenComplexShiftSolve wrapper class, or define their
/// own that implements all the public member functions as in
/// DenseGenComplexShiftSolve.
///
template <typename Scalar = double,
int SelectionRule = LARGEST_MAGN,
typename OpType = DenseGenComplexShiftSolve<double> >
class GenEigsComplexShiftSolver : public GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
{
private:
typedef Eigen::Index Index;
typedef std::complex<Scalar> Complex;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
const Scalar m_sigmar;
const Scalar m_sigmai;
// First transform back the Ritz values, and then sort
void sort_ritzpair(int sort_rule)
{
using std::abs;
using std::sqrt;
using std::norm;
// The eigenvalues we get from the iteration is
// nu = 0.5 * (1 / (lambda - sigma) + 1 / (lambda - conj(sigma)))
// So the eigenvalues of the original problem is
// 1 \pm sqrt(1 - 4 * nu^2 * sigmai^2)
// lambda = sigmar + -----------------------------------
// 2 * nu
// We need to pick the correct root
// Let (lambdaj, vj) be the j-th eigen pair, then A * vj = lambdaj * vj
// and inv(A - r * I) * vj = 1 / (lambdaj - r) * vj
// where r is any shift value.
// We can use this identity to determine lambdaj
//
// op(v) computes Re(inv(A - r * I) * v) for any real v
// If r is real, then op(v) is also real. Let a = Re(vj), b = Im(vj),
// then op(vj) = op(a) + op(b) * i
// By comparing op(vj) and [1 / (lambdaj - r) * vj], we can determine
// which one is the correct root
// Select a random shift value
SimpleRandom<Scalar> rng(0);
const Scalar shiftr = rng.random() * m_sigmar + rng.random();
const Complex shift = Complex(shiftr, Scalar(0));
this->m_op->set_shift(shiftr, Scalar(0));
// Calculate inv(A - r * I) * vj
Vector v_real(this->m_n), v_imag(this->m_n), OPv_real(this->m_n), OPv_imag(this->m_n);
const Scalar eps = Eigen::NumTraits<Scalar>::epsilon();
for (Index i = 0; i < this->m_nev; i++)
{
v_real.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).real();
v_imag.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).imag();
this->m_op->perform_op(v_real.data(), OPv_real.data());
this->m_op->perform_op(v_imag.data(), OPv_imag.data());
// Two roots computed from the quadratic equation
const Complex nu = this->m_ritz_val[i];
const Complex root_part1 = m_sigmar + Scalar(0.5) / nu;
const Complex root_part2 = Scalar(0.5) * sqrt(Scalar(1) - Scalar(4) * m_sigmai * m_sigmai * (nu * nu)) / nu;
const Complex root1 = root_part1 + root_part2;
const Complex root2 = root_part1 - root_part2;
// Test roots
Scalar err1 = Scalar(0), err2 = Scalar(0);
for (int k = 0; k < this->m_n; k++)
{
const Complex rhs1 = Complex(v_real[k], v_imag[k]) / (root1 - shift);
const Complex rhs2 = Complex(v_real[k], v_imag[k]) / (root2 - shift);
const Complex OPv = Complex(OPv_real[k], OPv_imag[k]);
err1 += norm(OPv - rhs1);
err2 += norm(OPv - rhs2);
}
const Complex lambdaj = (err1 < err2) ? root1 : root2;
this->m_ritz_val[i] = lambdaj;
if (abs(Eigen::numext::imag(lambdaj)) > eps)
{
this->m_ritz_val[i + 1] = Eigen::numext::conj(lambdaj);
i++;
}
else
{
this->m_ritz_val[i] = Complex(Eigen::numext::real(lambdaj), Scalar(0));
}
}
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a eigen solver object using the shift-and-invert mode.
///
/// \param op Pointer to the matrix operation object. This class should implement
/// the complex shift-solve operation of \f$A\f$: calculating
/// \f$\mathrm{Re}\{(A-\sigma I)^{-1}v\}\f$ for any vector \f$v\f$. Users could either
/// create the object from the DenseGenComplexShiftSolve wrapper class, or
/// define their own that implements all the public member functions
/// as in DenseGenComplexShiftSolve.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
/// \param sigmar The real part of the shift.
/// \param sigmai The imaginary part of the shift.
///
GenEigsComplexShiftSolver(OpType* op, Index nev, Index ncv, const Scalar& sigmar, const Scalar& sigmai) :
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv),
m_sigmar(sigmar), m_sigmai(sigmai)
{
this->m_op->set_shift(m_sigmar, m_sigmai);
}
};
} // namespace Spectra
#endif // GEN_EIGS_COMPLEX_SHIFT_SOLVER_H

View File

@ -0,0 +1,89 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef GEN_EIGS_REAL_SHIFT_SOLVER_H
#define GEN_EIGS_REAL_SHIFT_SOLVER_H
#include <Eigen/Core>
#include "GenEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseGenRealShiftSolve.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for general real matrices with
/// a real shift value in the **shift-and-invert mode**. The background
/// knowledge of the shift-and-invert mode can be found in the documentation
/// of the SymEigsShiftSolver class.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the shifted-and-inverted eigenvalues.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseGenRealShiftSolve and
/// SparseGenRealShiftSolve, or define their
/// own that implements all the public member functions as in
/// DenseGenRealShiftSolve.
///
template <typename Scalar = double,
int SelectionRule = LARGEST_MAGN,
typename OpType = DenseGenRealShiftSolve<double> >
class GenEigsRealShiftSolver : public GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
{
private:
typedef Eigen::Index Index;
typedef std::complex<Scalar> Complex;
typedef Eigen::Array<Complex, Eigen::Dynamic, 1> ComplexArray;
const Scalar m_sigma;
// First transform back the Ritz values, and then sort
void sort_ritzpair(int sort_rule)
{
// The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma)
// So the eigenvalues of the original problem is lambda = 1 / nu + sigma
ComplexArray ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma;
this->m_ritz_val.head(this->m_nev) = ritz_val_org;
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a eigen solver object using the shift-and-invert mode.
///
/// \param op Pointer to the matrix operation object. This class should implement
/// the shift-solve operation of \f$A\f$: calculating
/// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseGenRealShiftSolve, or
/// define their own that implements all the public member functions
/// as in DenseGenRealShiftSolve.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
/// \param sigma The real-valued shift.
///
GenEigsRealShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) :
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv),
m_sigma(sigma)
{
this->m_op->set_shift(m_sigma);
}
};
} // namespace Spectra
#endif // GEN_EIGS_REAL_SHIFT_SOLVER_H

158
gtsam/3rdparty/Spectra/GenEigsSolver.h vendored Normal file
View File

@ -0,0 +1,158 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef GEN_EIGS_SOLVER_H
#define GEN_EIGS_SOLVER_H
#include <Eigen/Core>
#include "GenEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseGenMatProd.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for general real matrices, i.e.,
/// to solve \f$Ax=\lambda x\f$ for a possibly non-symmetric \f$A\f$ matrix.
///
/// Most of the background information documented in the SymEigsSolver class
/// also applies to the GenEigsSolver class here, except that the eigenvalues
/// and eigenvectors of a general matrix can now be complex-valued.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the requested eigenvalues, for example `LARGEST_MAGN`
/// to retrieve eigenvalues with the largest magnitude.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseGenMatProd and
/// SparseGenMatProd, or define their
/// own that implements all the public member functions as in
/// DenseGenMatProd.
///
/// An example that illustrates the usage of GenEigsSolver is give below:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/GenEigsSolver.h>
/// // <Spectra/MatOp/DenseGenMatProd.h> is implicitly included
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to calculate the eigenvalues of M
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(10, 10);
///
/// // Construct matrix operation object using the wrapper class
/// DenseGenMatProd<double> op(M);
///
/// // Construct eigen solver object, requesting the largest
/// // (in magnitude, or norm) three eigenvalues
/// GenEigsSolver< double, LARGEST_MAGN, DenseGenMatProd<double> > eigs(&op, 3, 6);
///
/// // Initialize and compute
/// eigs.init();
/// int nconv = eigs.compute();
///
/// // Retrieve results
/// Eigen::VectorXcd evalues;
/// if(eigs.info() == SUCCESSFUL)
/// evalues = eigs.eigenvalues();
///
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
///
/// return 0;
/// }
/// \endcode
///
/// And also an example for sparse matrices:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Eigen/SparseCore>
/// #include <Spectra/GenEigsSolver.h>
/// #include <Spectra/MatOp/SparseGenMatProd.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // A band matrix with 1 on the main diagonal, 2 on the below-main subdiagonal,
/// // and 3 on the above-main subdiagonal
/// const int n = 10;
/// Eigen::SparseMatrix<double> M(n, n);
/// M.reserve(Eigen::VectorXi::Constant(n, 3));
/// for(int i = 0; i < n; i++)
/// {
/// M.insert(i, i) = 1.0;
/// if(i > 0)
/// M.insert(i - 1, i) = 3.0;
/// if(i < n - 1)
/// M.insert(i + 1, i) = 2.0;
/// }
///
/// // Construct matrix operation object using the wrapper class SparseGenMatProd
/// SparseGenMatProd<double> op(M);
///
/// // Construct eigen solver object, requesting the largest three eigenvalues
/// GenEigsSolver< double, LARGEST_MAGN, SparseGenMatProd<double> > eigs(&op, 3, 6);
///
/// // Initialize and compute
/// eigs.init();
/// int nconv = eigs.compute();
///
/// // Retrieve results
/// Eigen::VectorXcd evalues;
/// if(eigs.info() == SUCCESSFUL)
/// evalues = eigs.eigenvalues();
///
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
///
/// return 0;
/// }
/// \endcode
template <typename Scalar = double,
int SelectionRule = LARGEST_MAGN,
typename OpType = DenseGenMatProd<double> >
class GenEigsSolver : public GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
{
private:
typedef Eigen::Index Index;
public:
///
/// Constructor to create a solver object.
///
/// \param op Pointer to the matrix operation object, which should implement
/// the matrix-vector multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseGenMatProd, or
/// define their own that implements all the public member functions
/// as in DenseGenMatProd.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
///
GenEigsSolver(OpType* op, Index nev, Index ncv) :
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv)
{}
};
} // namespace Spectra
#endif // GEN_EIGS_SOLVER_H

284
gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h vendored Normal file
View File

@ -0,0 +1,284 @@
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef ARNOLDI_H
#define ARNOLDI_H
#include <Eigen/Core>
#include <cmath> // std::sqrt
#include <stdexcept> // std::invalid_argument
#include <sstream> // std::stringstream
#include "../MatOp/internal/ArnoldiOp.h"
#include "../Util/TypeTraits.h"
#include "../Util/SimpleRandom.h"
#include "UpperHessenbergQR.h"
#include "DoubleShiftQR.h"
namespace Spectra {
// Arnoldi factorization A * V = V * H + f * e'
// A: n x n
// V: n x k
// H: k x k
// f: n x 1
// e: [0, ..., 0, 1]
// V and H are allocated of dimension m, so the maximum value of k is m
template <typename Scalar, typename ArnoldiOpType>
class Arnoldi
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<Matrix> MapMat;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::Map<const Matrix> MapConstMat;
typedef Eigen::Map<const Vector> MapConstVec;
protected:
// clang-format off
ArnoldiOpType m_op; // Operators for the Arnoldi factorization
const Index m_n; // dimension of A
const Index m_m; // maximum dimension of subspace V
Index m_k; // current dimension of subspace V
Matrix m_fac_V; // V matrix in the Arnoldi factorization
Matrix m_fac_H; // H matrix in the Arnoldi factorization
Vector m_fac_f; // residual in the Arnoldi factorization
Scalar m_beta; // ||f||, B-norm of f
const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow
// ~= 1e-307 for the "double" type
const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type
// clang-format on
// Given orthonormal basis functions V, find a nonzero vector f such that V'Bf = 0
// Assume that f has been properly allocated
void expand_basis(MapConstMat& V, const Index seed, Vector& f, Scalar& fnorm)
{
using std::sqrt;
const Scalar thresh = m_eps * sqrt(Scalar(m_n));
Vector Vf(V.cols());
for (Index iter = 0; iter < 5; iter++)
{
// Randomly generate a new vector and orthogonalize it against V
SimpleRandom<Scalar> rng(seed + 123 * iter);
f.noalias() = rng.random_vec(m_n);
// f <- f - V * V'Bf, so that f is orthogonal to V in B-norm
m_op.trans_product(V, f, Vf);
f.noalias() -= V * Vf;
// fnorm <- ||f||
fnorm = m_op.norm(f);
// If fnorm is too close to zero, we try a new random vector,
// otherwise return the result
if (fnorm >= thresh)
return;
}
}
public:
Arnoldi(const ArnoldiOpType& op, Index m) :
m_op(op), m_n(op.rows()), m_m(m), m_k(0),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
m_eps(Eigen::NumTraits<Scalar>::epsilon())
{}
virtual ~Arnoldi() {}
// Const-reference to internal structures
const Matrix& matrix_V() const { return m_fac_V; }
const Matrix& matrix_H() const { return m_fac_H; }
const Vector& vector_f() const { return m_fac_f; }
Scalar f_norm() const { return m_beta; }
Index subspace_dim() const { return m_k; }
// Initialize with an operator and an initial vector
void init(MapConstVec& v0, Index& op_counter)
{
m_fac_V.resize(m_n, m_m);
m_fac_H.resize(m_m, m_m);
m_fac_f.resize(m_n);
m_fac_H.setZero();
// Verify the initial vector
const Scalar v0norm = m_op.norm(v0);
if (v0norm < m_near_0)
throw std::invalid_argument("initial residual vector cannot be zero");
// Points to the first column of V
MapVec v(m_fac_V.data(), m_n);
// Normalize
v.noalias() = v0 / v0norm;
// Compute H and f
Vector w(m_n);
m_op.perform_op(v.data(), w.data());
op_counter++;
m_fac_H(0, 0) = m_op.inner_product(v, w);
m_fac_f.noalias() = w - v * m_fac_H(0, 0);
// In some cases f is zero in exact arithmetics, but due to rounding errors
// it may contain tiny fluctuations. When this happens, we force f to be zero
if (m_fac_f.cwiseAbs().maxCoeff() < m_eps)
{
m_fac_f.setZero();
m_beta = Scalar(0);
}
else
{
m_beta = m_op.norm(m_fac_f);
}
// Indicate that this is a step-1 factorization
m_k = 1;
}
// Arnoldi factorization starting from step-k
virtual void factorize_from(Index from_k, Index to_m, Index& op_counter)
{
using std::sqrt;
if (to_m <= from_k)
return;
if (from_k > m_k)
{
std::stringstream msg;
msg << "Arnoldi: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")";
throw std::invalid_argument(msg.str());
}
const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n));
// Pre-allocate vectors
Vector Vf(to_m);
Vector w(m_n);
// Keep the upperleft k x k submatrix of H and set other elements to 0
m_fac_H.rightCols(m_m - from_k).setZero();
m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero();
for (Index i = from_k; i <= to_m - 1; i++)
{
bool restart = false;
// If beta = 0, then the next V is not full rank
// We need to generate a new residual vector that is orthogonal
// to the current V, which we call a restart
if (m_beta < m_near_0)
{
MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns
expand_basis(V, 2 * i, m_fac_f, m_beta);
restart = true;
}
// v <- f / ||f||
m_fac_V.col(i).noalias() = m_fac_f / m_beta; // The (i+1)-th column
// Note that H[i+1, i] equals to the unrestarted beta
m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta;
// w <- A * v, v = m_fac_V.col(i)
m_op.perform_op(&m_fac_V(0, i), w.data());
op_counter++;
const Index i1 = i + 1;
// First i+1 columns of V
MapConstMat Vs(m_fac_V.data(), m_n, i1);
// h = m_fac_H(0:i, i)
MapVec h(&m_fac_H(0, i), i1);
// h <- V'Bw
m_op.trans_product(Vs, w, h);
// f <- w - V * h
m_fac_f.noalias() = w - Vs * h;
m_beta = m_op.norm(m_fac_f);
if (m_beta > Scalar(0.717) * m_op.norm(h))
continue;
// f/||f|| is going to be the next column of V, so we need to test
// whether V'B(f/||f||) ~= 0
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
// If not, iteratively correct the residual
int count = 0;
while (count < 5 && ortho_err > m_eps * m_beta)
{
// There is an edge case: when beta=||f|| is close to zero, f mostly consists
// of noises of rounding errors, so the test [ortho_err < eps * beta] is very
// likely to fail. In particular, if beta=0, then the test is ensured to fail.
// Hence when this happens, we force f to be zero, and then restart in the
// next iteration.
if (m_beta < beta_thresh)
{
m_fac_f.setZero();
m_beta = Scalar(0);
break;
}
// f <- f - V * Vf
m_fac_f.noalias() -= Vs * Vf.head(i1);
// h <- h + Vf
h.noalias() += Vf.head(i1);
// beta <- ||f||
m_beta = m_op.norm(m_fac_f);
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
count++;
}
}
// Indicate that this is a step-m factorization
m_k = to_m;
}
// Apply H -> Q'HQ, where Q is from a double shift QR decomposition
void compress_H(const DoubleShiftQR<Scalar>& decomp)
{
decomp.matrix_QtHQ(m_fac_H);
m_k -= 2;
}
// Apply H -> Q'HQ, where Q is from an upper Hessenberg QR decomposition
void compress_H(const UpperHessenbergQR<Scalar>& decomp)
{
decomp.matrix_QtHQ(m_fac_H);
m_k--;
}
// Apply V -> VQ and compute the new f.
// Should be called after compress_H(), since m_k is updated there.
// Only need to update the first k+1 columns of V
// The first (m - k + i) elements of the i-th column of Q are non-zero,
// and the rest are zero
void compress_V(const Matrix& Q)
{
Matrix Vs(m_n, m_k + 1);
for (Index i = 0; i < m_k; i++)
{
const Index nnz = m_m - m_k + i + 1;
MapConstVec q(&Q(0, i), nnz);
Vs.col(i).noalias() = m_fac_V.leftCols(nnz) * q;
}
Vs.col(m_k).noalias() = m_fac_V * Q.col(m_k);
m_fac_V.leftCols(m_k + 1).noalias() = Vs;
Vector fk = m_fac_f * Q(m_m - 1, m_k - 1) + m_fac_V.col(m_k) * m_fac_H(m_k, m_k - 1);
m_fac_f.swap(fk);
m_beta = m_op.norm(m_fac_f);
}
};
} // namespace Spectra
#endif // ARNOLDI_H

530
gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h vendored Normal file
View File

@ -0,0 +1,530 @@
// Copyright (C) 2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef BK_LDLT_H
#define BK_LDLT_H
#include <Eigen/Core>
#include <vector>
#include <stdexcept>
#include "../Util/CompInfo.h"
namespace Spectra {
// Bunch-Kaufman LDLT decomposition
// References:
// 1. Bunch, J. R., & Kaufman, L. (1977). Some stable methods for calculating inertia and solving symmetric linear systems.
// Mathematics of computation, 31(137), 163-179.
// 2. Golub, G. H., & Van Loan, C. F. (2012). Matrix computations (Vol. 3). JHU press. Section 4.4.
// 3. Bunch-Parlett diagonal pivoting <http://oz.nthu.edu.tw/~d947207/Chap13_GE3.ppt>
// 4. Ashcraft, C., Grimes, R. G., & Lewis, J. G. (1998). Accurate symmetric indefinite linear equation solvers.
// SIAM Journal on Matrix Analysis and Applications, 20(2), 513-561.
template <typename Scalar = double>
class BKLDLT
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Matrix<Index, Eigen::Dynamic, 1> IntVector;
typedef Eigen::Ref<Vector> GenericVector;
typedef Eigen::Ref<Matrix> GenericMatrix;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
typedef const Eigen::Ref<const Vector> ConstGenericVector;
Index m_n;
Vector m_data; // storage for a lower-triangular matrix
std::vector<Scalar*> m_colptr; // pointers to columns
IntVector m_perm; // [-2, -1, 3, 1, 4, 5]: 0 <-> 2, 1 <-> 1, 2 <-> 3, 3 <-> 1, 4 <-> 4, 5 <-> 5
std::vector<std::pair<Index, Index> > m_permc; // compressed version of m_perm: [(0, 2), (2, 3), (3, 1)]
bool m_computed;
int m_info;
// Access to elements
// Pointer to the k-th column
Scalar* col_pointer(Index k) { return m_colptr[k]; }
// A[i, j] -> m_colptr[j][i - j], i >= j
Scalar& coeff(Index i, Index j) { return m_colptr[j][i - j]; }
const Scalar& coeff(Index i, Index j) const { return m_colptr[j][i - j]; }
// A[i, i] -> m_colptr[i][0]
Scalar& diag_coeff(Index i) { return m_colptr[i][0]; }
const Scalar& diag_coeff(Index i) const { return m_colptr[i][0]; }
// Compute column pointers
void compute_pointer()
{
m_colptr.clear();
m_colptr.reserve(m_n);
Scalar* head = m_data.data();
for (Index i = 0; i < m_n; i++)
{
m_colptr.push_back(head);
head += (m_n - i);
}
}
// Copy mat - shift * I to m_data
void copy_data(ConstGenericMatrix& mat, int uplo, const Scalar& shift)
{
if (uplo == Eigen::Lower)
{
for (Index j = 0; j < m_n; j++)
{
const Scalar* begin = &mat.coeffRef(j, j);
const Index len = m_n - j;
std::copy(begin, begin + len, col_pointer(j));
diag_coeff(j) -= shift;
}
}
else
{
Scalar* dest = m_data.data();
for (Index i = 0; i < m_n; i++)
{
for (Index j = i; j < m_n; j++, dest++)
{
*dest = mat.coeff(i, j);
}
diag_coeff(i) -= shift;
}
}
}
// Compute compressed permutations
void compress_permutation()
{
for (Index i = 0; i < m_n; i++)
{
// Recover the permutation action
const Index perm = (m_perm[i] >= 0) ? (m_perm[i]) : (-m_perm[i] - 1);
if (perm != i)
m_permc.push_back(std::make_pair(i, perm));
}
}
// Working on the A[k:end, k:end] submatrix
// Exchange k <-> r
// Assume r >= k
void pivoting_1x1(Index k, Index r)
{
// No permutation
if (k == r)
{
m_perm[k] = r;
return;
}
// A[k, k] <-> A[r, r]
std::swap(diag_coeff(k), diag_coeff(r));
// A[(r+1):end, k] <-> A[(r+1):end, r]
std::swap_ranges(&coeff(r + 1, k), col_pointer(k + 1), &coeff(r + 1, r));
// A[(k+1):(r-1), k] <-> A[r, (k+1):(r-1)]
Scalar* src = &coeff(k + 1, k);
for (Index j = k + 1; j < r; j++, src++)
{
std::swap(*src, coeff(r, j));
}
m_perm[k] = r;
}
// Working on the A[k:end, k:end] submatrix
// Exchange [k+1, k] <-> [r, p]
// Assume p >= k, r >= k+1
void pivoting_2x2(Index k, Index r, Index p)
{
pivoting_1x1(k, p);
pivoting_1x1(k + 1, r);
// A[k+1, k] <-> A[r, k]
std::swap(coeff(k + 1, k), coeff(r, k));
// Use negative signs to indicate a 2x2 block
// Also minus one to distinguish a negative zero from a positive zero
m_perm[k] = -m_perm[k] - 1;
m_perm[k + 1] = -m_perm[k + 1] - 1;
}
// A[r1, c1:c2] <-> A[r2, c1:c2]
// Assume r2 >= r1 > c2 >= c1
void interchange_rows(Index r1, Index r2, Index c1, Index c2)
{
if (r1 == r2)
return;
for (Index j = c1; j <= c2; j++)
{
std::swap(coeff(r1, j), coeff(r2, j));
}
}
// lambda = |A[r, k]| = max{|A[k+1, k]|, ..., |A[end, k]|}
// Largest (in magnitude) off-diagonal element in the first column of the current reduced matrix
// r is the row index
// Assume k < end
Scalar find_lambda(Index k, Index& r)
{
using std::abs;
const Scalar* head = col_pointer(k); // => A[k, k]
const Scalar* end = col_pointer(k + 1);
// Start with r=k+1, lambda=A[k+1, k]
r = k + 1;
Scalar lambda = abs(head[1]);
// Scan remaining elements
for (const Scalar* ptr = head + 2; ptr < end; ptr++)
{
const Scalar abs_elem = abs(*ptr);
if (lambda < abs_elem)
{
lambda = abs_elem;
r = k + (ptr - head);
}
}
return lambda;
}
// sigma = |A[p, r]| = max {|A[k, r]|, ..., |A[end, r]|} \ {A[r, r]}
// Largest (in magnitude) off-diagonal element in the r-th column of the current reduced matrix
// p is the row index
// Assume k < r < end
Scalar find_sigma(Index k, Index r, Index& p)
{
using std::abs;
// First search A[r+1, r], ..., A[end, r], which has the same task as find_lambda()
// If r == end, we skip this search
Scalar sigma = Scalar(-1);
if (r < m_n - 1)
sigma = find_lambda(r, p);
// Then search A[k, r], ..., A[r-1, r], which maps to A[r, k], ..., A[r, r-1]
for (Index j = k; j < r; j++)
{
const Scalar abs_elem = abs(coeff(r, j));
if (sigma < abs_elem)
{
sigma = abs_elem;
p = j;
}
}
return sigma;
}
// Generate permutations and apply to A
// Return true if the resulting pivoting is 1x1, and false if 2x2
bool permutate_mat(Index k, const Scalar& alpha)
{
using std::abs;
Index r = k, p = k;
const Scalar lambda = find_lambda(k, r);
// If lambda=0, no need to interchange
if (lambda > Scalar(0))
{
const Scalar abs_akk = abs(diag_coeff(k));
// If |A[k, k]| >= alpha * lambda, no need to interchange
if (abs_akk < alpha * lambda)
{
const Scalar sigma = find_sigma(k, r, p);
// If sigma * |A[k, k]| >= alpha * lambda^2, no need to interchange
if (sigma * abs_akk < alpha * lambda * lambda)
{
if (abs_akk >= alpha * sigma)
{
// Permutation on A
pivoting_1x1(k, r);
// Permutation on L
interchange_rows(k, r, 0, k - 1);
return true;
}
else
{
// There are two versions of permutation here
// 1. A[k+1, k] <-> A[r, k]
// 2. A[k+1, k] <-> A[r, p], where p >= k and r >= k+1
//
// Version 1 and 2 are used by Ref[1] and Ref[2], respectively
// Version 1 implementation
p = k;
// Version 2 implementation
// [r, p] and [p, r] are symmetric, but we need to make sure
// p >= k and r >= k+1, so it is safe to always make r > p
// One exception is when min{r,p} == k+1, in which case we make
// r = k+1, so that only one permutation needs to be performed
/* const Index rp_min = std::min(r, p);
const Index rp_max = std::max(r, p);
if(rp_min == k + 1)
{
r = rp_min; p = rp_max;
} else {
r = rp_max; p = rp_min;
} */
// Right now we use Version 1 since it reduces the overhead of interchange
// Permutation on A
pivoting_2x2(k, r, p);
// Permutation on L
interchange_rows(k, p, 0, k - 1);
interchange_rows(k + 1, r, 0, k - 1);
return false;
}
}
}
}
return true;
}
// E = [e11, e12]
// [e21, e22]
// Overwrite E with inv(E)
void inverse_inplace_2x2(Scalar& e11, Scalar& e21, Scalar& e22) const
{
// inv(E) = [d11, d12], d11 = e22/delta, d21 = -e21/delta, d22 = e11/delta
// [d21, d22]
const Scalar delta = e11 * e22 - e21 * e21;
std::swap(e11, e22);
e11 /= delta;
e22 /= delta;
e21 = -e21 / delta;
}
// Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE
int gaussian_elimination_1x1(Index k)
{
// D = 1 / A[k, k]
const Scalar akk = diag_coeff(k);
// Return NUMERICAL_ISSUE if not invertible
if (akk == Scalar(0))
return NUMERICAL_ISSUE;
diag_coeff(k) = Scalar(1) / akk;
// B -= l * l' / A[k, k], B := A[(k+1):end, (k+1):end], l := L[(k+1):end, k]
Scalar* lptr = col_pointer(k) + 1;
const Index ldim = m_n - k - 1;
MapVec l(lptr, ldim);
for (Index j = 0; j < ldim; j++)
{
MapVec(col_pointer(j + k + 1), ldim - j).noalias() -= (lptr[j] / akk) * l.tail(ldim - j);
}
// l /= A[k, k]
l /= akk;
return SUCCESSFUL;
}
// Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE
int gaussian_elimination_2x2(Index k)
{
// D = inv(E)
Scalar& e11 = diag_coeff(k);
Scalar& e21 = coeff(k + 1, k);
Scalar& e22 = diag_coeff(k + 1);
// Return NUMERICAL_ISSUE if not invertible
if (e11 * e22 - e21 * e21 == Scalar(0))
return NUMERICAL_ISSUE;
inverse_inplace_2x2(e11, e21, e22);
// X = l * inv(E), l := L[(k+2):end, k:(k+1)]
Scalar* l1ptr = &coeff(k + 2, k);
Scalar* l2ptr = &coeff(k + 2, k + 1);
const Index ldim = m_n - k - 2;
MapVec l1(l1ptr, ldim), l2(l2ptr, ldim);
Eigen::Matrix<Scalar, Eigen::Dynamic, 2> X(ldim, 2);
X.col(0).noalias() = l1 * e11 + l2 * e21;
X.col(1).noalias() = l1 * e21 + l2 * e22;
// B -= l * inv(E) * l' = X * l', B = A[(k+2):end, (k+2):end]
for (Index j = 0; j < ldim; j++)
{
MapVec(col_pointer(j + k + 2), ldim - j).noalias() -= (X.col(0).tail(ldim - j) * l1ptr[j] + X.col(1).tail(ldim - j) * l2ptr[j]);
}
// l = X
l1.noalias() = X.col(0);
l2.noalias() = X.col(1);
return SUCCESSFUL;
}
public:
BKLDLT() :
m_n(0), m_computed(false), m_info(NOT_COMPUTED)
{}
// Factorize mat - shift * I
BKLDLT(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) :
m_n(mat.rows()), m_computed(false), m_info(NOT_COMPUTED)
{
compute(mat, uplo, shift);
}
void compute(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0))
{
using std::abs;
m_n = mat.rows();
if (m_n != mat.cols())
throw std::invalid_argument("BKLDLT: matrix must be square");
m_perm.setLinSpaced(m_n, 0, m_n - 1);
m_permc.clear();
// Copy data
m_data.resize((m_n * (m_n + 1)) / 2);
compute_pointer();
copy_data(mat, uplo, shift);
const Scalar alpha = (1.0 + std::sqrt(17.0)) / 8.0;
Index k = 0;
for (k = 0; k < m_n - 1; k++)
{
// 1. Interchange rows and columns of A, and save the result to m_perm
bool is_1x1 = permutate_mat(k, alpha);
// 2. Gaussian elimination
if (is_1x1)
{
m_info = gaussian_elimination_1x1(k);
}
else
{
m_info = gaussian_elimination_2x2(k);
k++;
}
// 3. Check status
if (m_info != SUCCESSFUL)
break;
}
// Invert the last 1x1 block if it exists
if (k == m_n - 1)
{
const Scalar akk = diag_coeff(k);
if (akk == Scalar(0))
m_info = NUMERICAL_ISSUE;
diag_coeff(k) = Scalar(1) / diag_coeff(k);
}
compress_permutation();
m_computed = true;
}
// Solve Ax=b
void solve_inplace(GenericVector b) const
{
if (!m_computed)
throw std::logic_error("BKLDLT: need to call compute() first");
// PAP' = LDL'
// 1. b -> Pb
Scalar* x = b.data();
MapVec res(x, m_n);
Index npermc = m_permc.size();
for (Index i = 0; i < npermc; i++)
{
std::swap(x[m_permc[i].first], x[m_permc[i].second]);
}
// 2. Lz = Pb
// If m_perm[end] < 0, then end with m_n - 3, otherwise end with m_n - 2
const Index end = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2);
for (Index i = 0; i <= end; i++)
{
const Index b1size = m_n - i - 1;
const Index b2size = b1size - 1;
if (m_perm[i] >= 0)
{
MapConstVec l(&coeff(i + 1, i), b1size);
res.segment(i + 1, b1size).noalias() -= l * x[i];
}
else
{
MapConstVec l1(&coeff(i + 2, i), b2size);
MapConstVec l2(&coeff(i + 2, i + 1), b2size);
res.segment(i + 2, b2size).noalias() -= (l1 * x[i] + l2 * x[i + 1]);
i++;
}
}
// 3. Dw = z
for (Index i = 0; i < m_n; i++)
{
const Scalar e11 = diag_coeff(i);
if (m_perm[i] >= 0)
{
x[i] *= e11;
}
else
{
const Scalar e21 = coeff(i + 1, i), e22 = diag_coeff(i + 1);
const Scalar wi = x[i] * e11 + x[i + 1] * e21;
x[i + 1] = x[i] * e21 + x[i + 1] * e22;
x[i] = wi;
i++;
}
}
// 4. L'y = w
// If m_perm[end] < 0, then start with m_n - 3, otherwise start with m_n - 2
Index i = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2);
for (; i >= 0; i--)
{
const Index ldim = m_n - i - 1;
MapConstVec l(&coeff(i + 1, i), ldim);
x[i] -= res.segment(i + 1, ldim).dot(l);
if (m_perm[i] < 0)
{
MapConstVec l2(&coeff(i + 1, i - 1), ldim);
x[i - 1] -= res.segment(i + 1, ldim).dot(l2);
i--;
}
}
// 5. x = P'y
for (Index i = npermc - 1; i >= 0; i--)
{
std::swap(x[m_permc[i].first], x[m_permc[i].second]);
}
}
Vector solve(ConstGenericVector& b) const
{
Vector res = b;
solve_inplace(res);
return res;
}
int info() const { return m_info; }
};
} // namespace Spectra
#endif // BK_LDLT_H

View File

@ -0,0 +1,384 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DOUBLE_SHIFT_QR_H
#define DOUBLE_SHIFT_QR_H
#include <Eigen/Core>
#include <vector> // std::vector
#include <algorithm> // std::min, std::fill, std::copy
#include <cmath> // std::abs, std::sqrt, std::pow
#include <stdexcept> // std::invalid_argument, std::logic_error
#include "../Util/TypeTraits.h"
namespace Spectra {
template <typename Scalar = double>
class DoubleShiftQR
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, 3, Eigen::Dynamic> Matrix3X;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Array<unsigned char, Eigen::Dynamic, 1> IntArray;
typedef Eigen::Ref<Matrix> GenericMatrix;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
Index m_n; // Dimension of the matrix
Matrix m_mat_H; // A copy of the matrix to be factorized
Scalar m_shift_s; // Shift constant
Scalar m_shift_t; // Shift constant
Matrix3X m_ref_u; // Householder reflectors
IntArray m_ref_nr; // How many rows does each reflector affects
// 3 - A general reflector
// 2 - A Givens rotation
// 1 - An identity transformation
const Scalar m_near_0; // a very small value, but 1.0 / m_safe_min does not overflow
// ~= 1e-307 for the "double" type
const Scalar m_eps; // the machine precision,
// e.g. ~= 1e-16 for the "double" type
const Scalar m_eps_rel;
const Scalar m_eps_abs;
bool m_computed; // Whether matrix has been factorized
void compute_reflector(const Scalar& x1, const Scalar& x2, const Scalar& x3, Index ind)
{
using std::abs;
Scalar* u = &m_ref_u.coeffRef(0, ind);
unsigned char* nr = m_ref_nr.data();
// In general case the reflector affects 3 rows
nr[ind] = 3;
Scalar x2x3 = Scalar(0);
// If x3 is zero, decrease nr by 1
if (abs(x3) < m_near_0)
{
// If x2 is also zero, nr will be 1, and we can exit this function
if (abs(x2) < m_near_0)
{
nr[ind] = 1;
return;
}
else
{
nr[ind] = 2;
}
x2x3 = abs(x2);
}
else
{
x2x3 = Eigen::numext::hypot(x2, x3);
}
// x1' = x1 - rho * ||x||
// rho = -sign(x1), if x1 == 0, we choose rho = 1
Scalar x1_new = x1 - ((x1 <= 0) - (x1 > 0)) * Eigen::numext::hypot(x1, x2x3);
Scalar x_norm = Eigen::numext::hypot(x1_new, x2x3);
// Double check the norm of new x
if (x_norm < m_near_0)
{
nr[ind] = 1;
return;
}
u[0] = x1_new / x_norm;
u[1] = x2 / x_norm;
u[2] = x3 / x_norm;
}
void compute_reflector(const Scalar* x, Index ind)
{
compute_reflector(x[0], x[1], x[2], ind);
}
// Update the block X = H(il:iu, il:iu)
void update_block(Index il, Index iu)
{
// Block size
const Index bsize = iu - il + 1;
// If block size == 1, there is no need to apply reflectors
if (bsize == 1)
{
m_ref_nr.coeffRef(il) = 1;
return;
}
const Scalar x00 = m_mat_H.coeff(il, il),
x01 = m_mat_H.coeff(il, il + 1),
x10 = m_mat_H.coeff(il + 1, il),
x11 = m_mat_H.coeff(il + 1, il + 1);
// m00 = x00 * (x00 - s) + x01 * x10 + t
const Scalar m00 = x00 * (x00 - m_shift_s) + x01 * x10 + m_shift_t;
// m10 = x10 * (x00 + x11 - s)
const Scalar m10 = x10 * (x00 + x11 - m_shift_s);
// For block size == 2, do a Givens rotation on M = X * X - s * X + t * I
if (bsize == 2)
{
// This causes nr=2
compute_reflector(m00, m10, 0, il);
// Apply the reflector to X
apply_PX(m_mat_H.block(il, il, 2, m_n - il), m_n, il);
apply_XP(m_mat_H.block(0, il, il + 2, 2), m_n, il);
m_ref_nr.coeffRef(il + 1) = 1;
return;
}
// For block size >=3, use the regular strategy
// m20 = x21 * x10
const Scalar m20 = m_mat_H.coeff(il + 2, il + 1) * m_mat_H.coeff(il + 1, il);
compute_reflector(m00, m10, m20, il);
// Apply the first reflector
apply_PX(m_mat_H.block(il, il, 3, m_n - il), m_n, il);
apply_XP(m_mat_H.block(0, il, il + std::min(bsize, Index(4)), 3), m_n, il);
// Calculate the following reflectors
// If entering this loop, block size is at least 4.
for (Index i = 1; i < bsize - 2; i++)
{
compute_reflector(&m_mat_H.coeffRef(il + i, il + i - 1), il + i);
// Apply the reflector to X
apply_PX(m_mat_H.block(il + i, il + i - 1, 3, m_n - il - i + 1), m_n, il + i);
apply_XP(m_mat_H.block(0, il + i, il + std::min(bsize, Index(i + 4)), 3), m_n, il + i);
}
// The last reflector
// This causes nr=2
compute_reflector(m_mat_H.coeff(iu - 1, iu - 2), m_mat_H.coeff(iu, iu - 2), 0, iu - 1);
// Apply the reflector to X
apply_PX(m_mat_H.block(iu - 1, iu - 2, 2, m_n - iu + 2), m_n, iu - 1);
apply_XP(m_mat_H.block(0, iu - 1, il + bsize, 2), m_n, iu - 1);
m_ref_nr.coeffRef(iu) = 1;
}
// P = I - 2 * u * u' = P'
// PX = X - 2 * u * (u'X)
void apply_PX(GenericMatrix X, Index stride, Index u_ind) const
{
const Index nr = m_ref_nr.coeff(u_ind);
if (nr == 1)
return;
const Scalar u0 = m_ref_u.coeff(0, u_ind),
u1 = m_ref_u.coeff(1, u_ind);
const Scalar u0_2 = Scalar(2) * u0,
u1_2 = Scalar(2) * u1;
const Index nrow = X.rows();
const Index ncol = X.cols();
Scalar* xptr = X.data();
if (nr == 2 || nrow == 2)
{
for (Index i = 0; i < ncol; i++, xptr += stride)
{
const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1];
xptr[0] -= tmp * u0;
xptr[1] -= tmp * u1;
}
}
else
{
const Scalar u2 = m_ref_u.coeff(2, u_ind);
const Scalar u2_2 = Scalar(2) * u2;
for (Index i = 0; i < ncol; i++, xptr += stride)
{
const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1] + u2_2 * xptr[2];
xptr[0] -= tmp * u0;
xptr[1] -= tmp * u1;
xptr[2] -= tmp * u2;
}
}
}
// x is a pointer to a vector
// Px = x - 2 * dot(x, u) * u
void apply_PX(Scalar* x, Index u_ind) const
{
const Index nr = m_ref_nr.coeff(u_ind);
if (nr == 1)
return;
const Scalar u0 = m_ref_u.coeff(0, u_ind),
u1 = m_ref_u.coeff(1, u_ind),
u2 = m_ref_u.coeff(2, u_ind);
// When the reflector only contains two elements, u2 has been set to zero
const bool nr_is_2 = (nr == 2);
const Scalar dot2 = Scalar(2) * (x[0] * u0 + x[1] * u1 + (nr_is_2 ? 0 : (x[2] * u2)));
x[0] -= dot2 * u0;
x[1] -= dot2 * u1;
if (!nr_is_2)
x[2] -= dot2 * u2;
}
// XP = X - 2 * (X * u) * u'
void apply_XP(GenericMatrix X, Index stride, Index u_ind) const
{
const Index nr = m_ref_nr.coeff(u_ind);
if (nr == 1)
return;
const Scalar u0 = m_ref_u.coeff(0, u_ind),
u1 = m_ref_u.coeff(1, u_ind);
const Scalar u0_2 = Scalar(2) * u0,
u1_2 = Scalar(2) * u1;
const int nrow = X.rows();
const int ncol = X.cols();
Scalar *X0 = X.data(), *X1 = X0 + stride; // X0 => X.col(0), X1 => X.col(1)
if (nr == 2 || ncol == 2)
{
// tmp = 2 * u0 * X0 + 2 * u1 * X1
// X0 => X0 - u0 * tmp
// X1 => X1 - u1 * tmp
for (Index i = 0; i < nrow; i++)
{
const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i];
X0[i] -= tmp * u0;
X1[i] -= tmp * u1;
}
}
else
{
Scalar* X2 = X1 + stride; // X2 => X.col(2)
const Scalar u2 = m_ref_u.coeff(2, u_ind);
const Scalar u2_2 = Scalar(2) * u2;
for (Index i = 0; i < nrow; i++)
{
const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i] + u2_2 * X2[i];
X0[i] -= tmp * u0;
X1[i] -= tmp * u1;
X2[i] -= tmp * u2;
}
}
}
public:
DoubleShiftQR(Index size) :
m_n(size),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
m_eps(Eigen::NumTraits<Scalar>::epsilon()),
m_eps_rel(m_eps),
m_eps_abs(m_near_0 * (m_n / m_eps)),
m_computed(false)
{}
DoubleShiftQR(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) :
m_n(mat.rows()),
m_mat_H(m_n, m_n),
m_shift_s(s),
m_shift_t(t),
m_ref_u(3, m_n),
m_ref_nr(m_n),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
m_eps(Eigen::NumTraits<Scalar>::epsilon()),
m_eps_rel(m_eps),
m_eps_abs(m_near_0 * (m_n / m_eps)),
m_computed(false)
{
compute(mat, s, t);
}
void compute(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t)
{
using std::abs;
m_n = mat.rows();
if (m_n != mat.cols())
throw std::invalid_argument("DoubleShiftQR: matrix must be square");
m_mat_H.resize(m_n, m_n);
m_shift_s = s;
m_shift_t = t;
m_ref_u.resize(3, m_n);
m_ref_nr.resize(m_n);
// Make a copy of mat
std::copy(mat.data(), mat.data() + mat.size(), m_mat_H.data());
// Obtain the indices of zero elements in the subdiagonal,
// so that H can be divided into several blocks
std::vector<int> zero_ind;
zero_ind.reserve(m_n - 1);
zero_ind.push_back(0);
Scalar* Hii = m_mat_H.data();
for (Index i = 0; i < m_n - 2; i++, Hii += (m_n + 1))
{
// Hii[1] => m_mat_H(i + 1, i)
const Scalar h = abs(Hii[1]);
if (h <= 0 || h <= m_eps_rel * (abs(Hii[0]) + abs(Hii[m_n + 1])))
{
Hii[1] = 0;
zero_ind.push_back(i + 1);
}
// Make sure m_mat_H is upper Hessenberg
// Zero the elements below m_mat_H(i + 1, i)
std::fill(Hii + 2, Hii + m_n - i, Scalar(0));
}
zero_ind.push_back(m_n);
for (std::vector<int>::size_type i = 0; i < zero_ind.size() - 1; i++)
{
const Index start = zero_ind[i];
const Index end = zero_ind[i + 1] - 1;
// Compute refelctors and update each block
update_block(start, end);
}
m_computed = true;
}
void matrix_QtHQ(Matrix& dest) const
{
if (!m_computed)
throw std::logic_error("DoubleShiftQR: need to call compute() first");
dest.noalias() = m_mat_H;
}
// Q = P0 * P1 * ...
// Q'y = P_{n-2} * ... * P1 * P0 * y
void apply_QtY(Vector& y) const
{
if (!m_computed)
throw std::logic_error("DoubleShiftQR: need to call compute() first");
Scalar* y_ptr = y.data();
const Index n1 = m_n - 1;
for (Index i = 0; i < n1; i++, y_ptr++)
{
apply_PX(y_ptr, i);
}
}
// Q = P0 * P1 * ...
// YQ = Y * P0 * P1 * ...
void apply_YQ(GenericMatrix Y) const
{
if (!m_computed)
throw std::logic_error("DoubleShiftQR: need to call compute() first");
const Index nrow = Y.rows();
const Index n2 = m_n - 2;
for (Index i = 0; i < n2; i++)
{
apply_XP(Y.block(0, i, nrow, 3), nrow, i);
}
apply_XP(Y.block(0, n2, nrow, 2), nrow, n2);
}
};
} // namespace Spectra
#endif // DOUBLE_SHIFT_QR_H

167
gtsam/3rdparty/Spectra/LinAlg/Lanczos.h vendored Normal file
View File

@ -0,0 +1,167 @@
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef LANCZOS_H
#define LANCZOS_H
#include <Eigen/Core>
#include <cmath> // std::sqrt
#include <stdexcept> // std::invalid_argument
#include <sstream> // std::stringstream
#include "Arnoldi.h"
namespace Spectra {
// Lanczos factorization A * V = V * H + f * e'
// A: n x n
// V: n x k
// H: k x k
// f: n x 1
// e: [0, ..., 0, 1]
// V and H are allocated of dimension m, so the maximum value of k is m
template <typename Scalar, typename ArnoldiOpType>
class Lanczos : public Arnoldi<Scalar, ArnoldiOpType>
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<Matrix> MapMat;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::Map<const Matrix> MapConstMat;
typedef Eigen::Map<const Vector> MapConstVec;
using Arnoldi<Scalar, ArnoldiOpType>::m_op;
using Arnoldi<Scalar, ArnoldiOpType>::m_n;
using Arnoldi<Scalar, ArnoldiOpType>::m_m;
using Arnoldi<Scalar, ArnoldiOpType>::m_k;
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_V;
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_H;
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_f;
using Arnoldi<Scalar, ArnoldiOpType>::m_beta;
using Arnoldi<Scalar, ArnoldiOpType>::m_near_0;
using Arnoldi<Scalar, ArnoldiOpType>::m_eps;
public:
Lanczos(const ArnoldiOpType& op, Index m) :
Arnoldi<Scalar, ArnoldiOpType>(op, m)
{}
// Lanczos factorization starting from step-k
void factorize_from(Index from_k, Index to_m, Index& op_counter)
{
using std::sqrt;
if (to_m <= from_k)
return;
if (from_k > m_k)
{
std::stringstream msg;
msg << "Lanczos: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")";
throw std::invalid_argument(msg.str());
}
const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n));
// Pre-allocate vectors
Vector Vf(to_m);
Vector w(m_n);
// Keep the upperleft k x k submatrix of H and set other elements to 0
m_fac_H.rightCols(m_m - from_k).setZero();
m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero();
for (Index i = from_k; i <= to_m - 1; i++)
{
bool restart = false;
// If beta = 0, then the next V is not full rank
// We need to generate a new residual vector that is orthogonal
// to the current V, which we call a restart
if (m_beta < m_near_0)
{
MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns
this->expand_basis(V, 2 * i, m_fac_f, m_beta);
restart = true;
}
// v <- f / ||f||
MapVec v(&m_fac_V(0, i), m_n); // The (i+1)-th column
v.noalias() = m_fac_f / m_beta;
// Note that H[i+1, i] equals to the unrestarted beta
m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta;
// w <- A * v
m_op.perform_op(v.data(), w.data());
op_counter++;
// H[i+1, i+1] = <v, w> = v'Bw
m_fac_H(i - 1, i) = m_fac_H(i, i - 1); // Due to symmetry
m_fac_H(i, i) = m_op.inner_product(v, w);
// f <- w - V * V'Bw = w - H[i+1, i] * V{i} - H[i+1, i+1] * V{i+1}
// If restarting, we know that H[i+1, i] = 0
if (restart)
m_fac_f.noalias() = w - m_fac_H(i, i) * v;
else
m_fac_f.noalias() = w - m_fac_H(i, i - 1) * m_fac_V.col(i - 1) - m_fac_H(i, i) * v;
m_beta = m_op.norm(m_fac_f);
// f/||f|| is going to be the next column of V, so we need to test
// whether V'B(f/||f||) ~= 0
const Index i1 = i + 1;
MapMat Vs(m_fac_V.data(), m_n, i1); // The first (i+1) columns
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
// If not, iteratively correct the residual
int count = 0;
while (count < 5 && ortho_err > m_eps * m_beta)
{
// There is an edge case: when beta=||f|| is close to zero, f mostly consists
// of noises of rounding errors, so the test [ortho_err < eps * beta] is very
// likely to fail. In particular, if beta=0, then the test is ensured to fail.
// Hence when this happens, we force f to be zero, and then restart in the
// next iteration.
if (m_beta < beta_thresh)
{
m_fac_f.setZero();
m_beta = Scalar(0);
break;
}
// f <- f - V * Vf
m_fac_f.noalias() -= Vs * Vf.head(i1);
// h <- h + Vf
m_fac_H(i - 1, i) += Vf[i - 1];
m_fac_H(i, i - 1) = m_fac_H(i - 1, i);
m_fac_H(i, i) += Vf[i];
// beta <- ||f||
m_beta = m_op.norm(m_fac_f);
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
count++;
}
}
// Indicate that this is a step-m factorization
m_k = to_m;
}
// Apply H -> Q'HQ, where Q is from a tridiagonal QR decomposition
void compress_H(const TridiagQR<Scalar>& decomp)
{
decomp.matrix_QtHQ(m_fac_H);
m_k--;
}
};
} // namespace Spectra
#endif // LANCZOS_H

View File

@ -0,0 +1,219 @@
// The code was adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h
//
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef TRIDIAG_EIGEN_H
#define TRIDIAG_EIGEN_H
#include <Eigen/Core>
#include <Eigen/Jacobi>
#include <stdexcept>
#include "../Util/TypeTraits.h"
namespace Spectra {
template <typename Scalar = double>
class TridiagEigen
{
private:
typedef Eigen::Index Index;
// For convenience in adapting the tridiagonal_qr_step() function
typedef Scalar RealScalar;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Ref<Matrix> GenericMatrix;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
Index m_n;
Vector m_main_diag; // Main diagonal elements of the matrix
Vector m_sub_diag; // Sub-diagonal elements of the matrix
Matrix m_evecs; // To store eigenvectors
bool m_computed;
const Scalar m_near_0; // a very small value, ~= 1e-307 for the "double" type
// Adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h
static void tridiagonal_qr_step(RealScalar* diag,
RealScalar* subdiag, Index start,
Index end, Scalar* matrixQ,
Index n)
{
using std::abs;
RealScalar td = (diag[end - 1] - diag[end]) * RealScalar(0.5);
RealScalar e = subdiag[end - 1];
// Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
// underflow thus leading to inf/NaN values when using the following commented code:
// RealScalar e2 = numext::abs2(subdiag[end-1]);
// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
// This explain the following, somewhat more complicated, version:
RealScalar mu = diag[end];
if (td == Scalar(0))
mu -= abs(e);
else
{
RealScalar e2 = Eigen::numext::abs2(subdiag[end - 1]);
RealScalar h = Eigen::numext::hypot(td, e);
if (e2 == RealScalar(0))
mu -= (e / (td + (td > RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h);
else
mu -= e2 / (td + (td > RealScalar(0) ? h : -h));
}
RealScalar x = diag[start] - mu;
RealScalar z = subdiag[start];
Eigen::Map<Matrix> q(matrixQ, n, n);
for (Index k = start; k < end; ++k)
{
Eigen::JacobiRotation<RealScalar> rot;
rot.makeGivens(x, z);
const RealScalar s = rot.s();
const RealScalar c = rot.c();
// do T = G' T G
RealScalar sdk = s * diag[k] + c * subdiag[k];
RealScalar dkp1 = s * subdiag[k] + c * diag[k + 1];
diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k + 1]);
diag[k + 1] = s * sdk + c * dkp1;
subdiag[k] = c * sdk - s * dkp1;
if (k > start)
subdiag[k - 1] = c * subdiag[k - 1] - s * z;
x = subdiag[k];
if (k < end - 1)
{
z = -s * subdiag[k + 1];
subdiag[k + 1] = c * subdiag[k + 1];
}
// apply the givens rotation to the unit matrix Q = Q * G
if (matrixQ)
q.applyOnTheRight(k, k + 1, rot);
}
}
public:
TridiagEigen() :
m_n(0), m_computed(false),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10))
{}
TridiagEigen(ConstGenericMatrix& mat) :
m_n(mat.rows()), m_computed(false),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10))
{
compute(mat);
}
void compute(ConstGenericMatrix& mat)
{
using std::abs;
m_n = mat.rows();
if (m_n != mat.cols())
throw std::invalid_argument("TridiagEigen: matrix must be square");
m_main_diag.resize(m_n);
m_sub_diag.resize(m_n - 1);
m_evecs.resize(m_n, m_n);
m_evecs.setIdentity();
// Scale matrix to improve stability
const Scalar scale = std::max(mat.diagonal().cwiseAbs().maxCoeff(),
mat.diagonal(-1).cwiseAbs().maxCoeff());
// If scale=0, mat is a zero matrix, so we can early stop
if (scale < m_near_0)
{
// m_main_diag contains eigenvalues
m_main_diag.setZero();
// m_evecs has been set identity
// m_evecs.setIdentity();
m_computed = true;
return;
}
m_main_diag.noalias() = mat.diagonal() / scale;
m_sub_diag.noalias() = mat.diagonal(-1) / scale;
Scalar* diag = m_main_diag.data();
Scalar* subdiag = m_sub_diag.data();
Index end = m_n - 1;
Index start = 0;
Index iter = 0; // total number of iterations
int info = 0; // 0 for success, 1 for failure
const Scalar considerAsZero = TypeTraits<Scalar>::min();
const Scalar precision = Scalar(2) * Eigen::NumTraits<Scalar>::epsilon();
while (end > 0)
{
for (Index i = start; i < end; i++)
if (abs(subdiag[i]) <= considerAsZero ||
abs(subdiag[i]) <= (abs(diag[i]) + abs(diag[i + 1])) * precision)
subdiag[i] = 0;
// find the largest unreduced block
while (end > 0 && subdiag[end - 1] == Scalar(0))
end--;
if (end <= 0)
break;
// if we spent too many iterations, we give up
iter++;
if (iter > 30 * m_n)
{
info = 1;
break;
}
start = end - 1;
while (start > 0 && subdiag[start - 1] != Scalar(0))
start--;
tridiagonal_qr_step(diag, subdiag, start, end, m_evecs.data(), m_n);
}
if (info > 0)
throw std::runtime_error("TridiagEigen: eigen decomposition failed");
// Scale eigenvalues back
m_main_diag *= scale;
m_computed = true;
}
const Vector& eigenvalues() const
{
if (!m_computed)
throw std::logic_error("TridiagEigen: need to call compute() first");
// After calling compute(), main_diag will contain the eigenvalues.
return m_main_diag;
}
const Matrix& eigenvectors() const
{
if (!m_computed)
throw std::logic_error("TridiagEigen: need to call compute() first");
return m_evecs;
}
};
} // namespace Spectra
#endif // TRIDIAG_EIGEN_H

View File

@ -0,0 +1,319 @@
// The code was adapted from Eigen/src/Eigenvaleus/EigenSolver.h
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef UPPER_HESSENBERG_EIGEN_H
#define UPPER_HESSENBERG_EIGEN_H
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include <stdexcept>
namespace Spectra {
template <typename Scalar = double>
class UpperHessenbergEigen
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Ref<Matrix> GenericMatrix;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
typedef std::complex<Scalar> Complex;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
Index m_n; // Size of the matrix
Eigen::RealSchur<Matrix> m_realSchur; // Schur decomposition solver
Matrix m_matT; // Schur T matrix
Matrix m_eivec; // Storing eigenvectors
ComplexVector m_eivalues; // Eigenvalues
bool m_computed;
void doComputeEigenvectors()
{
using std::abs;
const Index size = m_eivec.cols();
const Scalar eps = Eigen::NumTraits<Scalar>::epsilon();
// inefficient! this is already computed in RealSchur
Scalar norm(0);
for (Index j = 0; j < size; ++j)
{
norm += m_matT.row(j).segment((std::max)(j - 1, Index(0)), size - (std::max)(j - 1, Index(0))).cwiseAbs().sum();
}
// Backsubstitute to find vectors of upper triangular form
if (norm == Scalar(0))
return;
for (Index n = size - 1; n >= 0; n--)
{
Scalar p = m_eivalues.coeff(n).real();
Scalar q = m_eivalues.coeff(n).imag();
// Scalar vector
if (q == Scalar(0))
{
Scalar lastr(0), lastw(0);
Index l = n;
m_matT.coeffRef(n, n) = Scalar(1);
for (Index i = n - 1; i >= 0; i--)
{
Scalar w = m_matT.coeff(i, i) - p;
Scalar r = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1));
if (m_eivalues.coeff(i).imag() < Scalar(0))
{
lastw = w;
lastr = r;
}
else
{
l = i;
if (m_eivalues.coeff(i).imag() == Scalar(0))
{
if (w != Scalar(0))
m_matT.coeffRef(i, n) = -r / w;
else
m_matT.coeffRef(i, n) = -r / (eps * norm);
}
else // Solve real equations
{
Scalar x = m_matT.coeff(i, i + 1);
Scalar y = m_matT.coeff(i + 1, i);
Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
Scalar t = (x * lastr - lastw * r) / denom;
m_matT.coeffRef(i, n) = t;
if (abs(x) > abs(lastw))
m_matT.coeffRef(i + 1, n) = (-r - w * t) / x;
else
m_matT.coeffRef(i + 1, n) = (-lastr - y * t) / lastw;
}
// Overflow control
Scalar t = abs(m_matT.coeff(i, n));
if ((eps * t) * t > Scalar(1))
m_matT.col(n).tail(size - i) /= t;
}
}
}
else if (q < Scalar(0) && n > 0)
{ // Complex vector
Scalar lastra(0), lastsa(0), lastw(0);
Index l = n - 1;
// Last vector component imaginary so matrix is triangular
if (abs(m_matT.coeff(n, n - 1)) > abs(m_matT.coeff(n - 1, n)))
{
m_matT.coeffRef(n - 1, n - 1) = q / m_matT.coeff(n, n - 1);
m_matT.coeffRef(n - 1, n) = -(m_matT.coeff(n, n) - p) / m_matT.coeff(n, n - 1);
}
else
{
Complex cc = Complex(Scalar(0), -m_matT.coeff(n - 1, n)) / Complex(m_matT.coeff(n - 1, n - 1) - p, q);
m_matT.coeffRef(n - 1, n - 1) = Eigen::numext::real(cc);
m_matT.coeffRef(n - 1, n) = Eigen::numext::imag(cc);
}
m_matT.coeffRef(n, n - 1) = Scalar(0);
m_matT.coeffRef(n, n) = Scalar(1);
for (Index i = n - 2; i >= 0; i--)
{
Scalar ra = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n - 1).segment(l, n - l + 1));
Scalar sa = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1));
Scalar w = m_matT.coeff(i, i) - p;
if (m_eivalues.coeff(i).imag() < Scalar(0))
{
lastw = w;
lastra = ra;
lastsa = sa;
}
else
{
l = i;
if (m_eivalues.coeff(i).imag() == Scalar(0))
{
Complex cc = Complex(-ra, -sa) / Complex(w, q);
m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc);
m_matT.coeffRef(i, n) = Eigen::numext::imag(cc);
}
else
{
// Solve complex equations
Scalar x = m_matT.coeff(i, i + 1);
Scalar y = m_matT.coeff(i + 1, i);
Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;
if ((vr == Scalar(0)) && (vi == Scalar(0)))
vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw));
Complex cc = Complex(x * lastra - lastw * ra + q * sa, x * lastsa - lastw * sa - q * ra) / Complex(vr, vi);
m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc);
m_matT.coeffRef(i, n) = Eigen::numext::imag(cc);
if (abs(x) > (abs(lastw) + abs(q)))
{
m_matT.coeffRef(i + 1, n - 1) = (-ra - w * m_matT.coeff(i, n - 1) + q * m_matT.coeff(i, n)) / x;
m_matT.coeffRef(i + 1, n) = (-sa - w * m_matT.coeff(i, n) - q * m_matT.coeff(i, n - 1)) / x;
}
else
{
cc = Complex(-lastra - y * m_matT.coeff(i, n - 1), -lastsa - y * m_matT.coeff(i, n)) / Complex(lastw, q);
m_matT.coeffRef(i + 1, n - 1) = Eigen::numext::real(cc);
m_matT.coeffRef(i + 1, n) = Eigen::numext::imag(cc);
}
}
// Overflow control
Scalar t = std::max(abs(m_matT.coeff(i, n - 1)), abs(m_matT.coeff(i, n)));
if ((eps * t) * t > Scalar(1))
m_matT.block(i, n - 1, size - i, 2) /= t;
}
}
// We handled a pair of complex conjugate eigenvalues, so need to skip them both
n--;
}
}
// Back transformation to get eigenvectors of original matrix
Vector m_tmp(size);
for (Index j = size - 1; j >= 0; j--)
{
m_tmp.noalias() = m_eivec.leftCols(j + 1) * m_matT.col(j).segment(0, j + 1);
m_eivec.col(j) = m_tmp;
}
}
public:
UpperHessenbergEigen() :
m_n(0), m_computed(false)
{}
UpperHessenbergEigen(ConstGenericMatrix& mat) :
m_n(mat.rows()), m_computed(false)
{
compute(mat);
}
void compute(ConstGenericMatrix& mat)
{
using std::abs;
using std::sqrt;
if (mat.rows() != mat.cols())
throw std::invalid_argument("UpperHessenbergEigen: matrix must be square");
m_n = mat.rows();
// Scale matrix prior to the Schur decomposition
const Scalar scale = mat.cwiseAbs().maxCoeff();
// Reduce to real Schur form
Matrix Q = Matrix::Identity(m_n, m_n);
m_realSchur.computeFromHessenberg(mat / scale, Q, true);
if (m_realSchur.info() != Eigen::Success)
throw std::runtime_error("UpperHessenbergEigen: eigen decomposition failed");
m_matT = m_realSchur.matrixT();
m_eivec = m_realSchur.matrixU();
// Compute eigenvalues from matT
m_eivalues.resize(m_n);
Index i = 0;
while (i < m_n)
{
// Real eigenvalue
if (i == m_n - 1 || m_matT.coeff(i + 1, i) == Scalar(0))
{
m_eivalues.coeffRef(i) = m_matT.coeff(i, i);
++i;
}
else // Complex eigenvalues
{
Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i + 1, i + 1));
Scalar z;
// Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
// without overflow
{
Scalar t0 = m_matT.coeff(i + 1, i);
Scalar t1 = m_matT.coeff(i, i + 1);
Scalar maxval = std::max(abs(p), std::max(abs(t0), abs(t1)));
t0 /= maxval;
t1 /= maxval;
Scalar p0 = p / maxval;
z = maxval * sqrt(abs(p0 * p0 + t0 * t1));
}
m_eivalues.coeffRef(i) = Complex(m_matT.coeff(i + 1, i + 1) + p, z);
m_eivalues.coeffRef(i + 1) = Complex(m_matT.coeff(i + 1, i + 1) + p, -z);
i += 2;
}
}
// Compute eigenvectors
doComputeEigenvectors();
// Scale eigenvalues back
m_eivalues *= scale;
m_computed = true;
}
const ComplexVector& eigenvalues() const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergEigen: need to call compute() first");
return m_eivalues;
}
ComplexMatrix eigenvectors()
{
using std::abs;
if (!m_computed)
throw std::logic_error("UpperHessenbergEigen: need to call compute() first");
Index n = m_eivec.cols();
ComplexMatrix matV(n, n);
for (Index j = 0; j < n; ++j)
{
// imaginary part of real eigenvalue is already set to exact zero
if (Eigen::numext::imag(m_eivalues.coeff(j)) == Scalar(0) || j + 1 == n)
{
// we have a real eigen value
matV.col(j) = m_eivec.col(j).template cast<Complex>();
matV.col(j).normalize();
}
else
{
// we have a pair of complex eigen values
for (Index i = 0; i < n; ++i)
{
matV.coeffRef(i, j) = Complex(m_eivec.coeff(i, j), m_eivec.coeff(i, j + 1));
matV.coeffRef(i, j + 1) = Complex(m_eivec.coeff(i, j), -m_eivec.coeff(i, j + 1));
}
matV.col(j).normalize();
matV.col(j + 1).normalize();
++j;
}
}
return matV;
}
};
} // namespace Spectra
#endif // UPPER_HESSENBERG_EIGEN_H

View File

@ -0,0 +1,670 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef UPPER_HESSENBERG_QR_H
#define UPPER_HESSENBERG_QR_H
#include <Eigen/Core>
#include <cmath> // std::sqrt
#include <algorithm> // std::fill, std::copy
#include <stdexcept> // std::logic_error
namespace Spectra {
///
/// \defgroup Internals Internal Classes
///
/// Classes for internal use. May be useful to developers.
///
///
/// \ingroup Internals
/// @{
///
///
/// \defgroup LinearAlgebra Linear Algebra
///
/// A number of classes for linear algebra operations.
///
///
/// \ingroup LinearAlgebra
///
/// Perform the QR decomposition of an upper Hessenberg matrix.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
///
template <typename Scalar = double>
class UpperHessenbergQR
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Matrix<Scalar, 1, Eigen::Dynamic> RowVector;
typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> Array;
typedef Eigen::Ref<Matrix> GenericMatrix;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
Matrix m_mat_T;
protected:
Index m_n;
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
// Q = G1 * G2 * ... * G_{n-1}
Scalar m_shift;
Array m_rot_cos;
Array m_rot_sin;
bool m_computed;
// Given x and y, compute 1) r = sqrt(x^2 + y^2), 2) c = x / r, 3) s = -y / r
// If both x and y are zero, set c = 1 and s = 0
// We must implement it in a numerically stable way
static void compute_rotation(const Scalar& x, const Scalar& y, Scalar& r, Scalar& c, Scalar& s)
{
using std::sqrt;
const Scalar xsign = (x > Scalar(0)) - (x < Scalar(0));
const Scalar ysign = (y > Scalar(0)) - (y < Scalar(0));
const Scalar xabs = x * xsign;
const Scalar yabs = y * ysign;
if (xabs > yabs)
{
// In this case xabs != 0
const Scalar ratio = yabs / xabs; // so that 0 <= ratio < 1
const Scalar common = sqrt(Scalar(1) + ratio * ratio);
c = xsign / common;
r = xabs * common;
s = -y / r;
}
else
{
if (yabs == Scalar(0))
{
r = Scalar(0);
c = Scalar(1);
s = Scalar(0);
return;
}
const Scalar ratio = xabs / yabs; // so that 0 <= ratio <= 1
const Scalar common = sqrt(Scalar(1) + ratio * ratio);
s = -ysign / common;
r = yabs * common;
c = x / r;
}
}
public:
///
/// Constructor to preallocate memory. Computation can
/// be performed later by calling the compute() method.
///
UpperHessenbergQR(Index size) :
m_n(size),
m_rot_cos(m_n - 1),
m_rot_sin(m_n - 1),
m_computed(false)
{}
///
/// Constructor to create an object that performs and stores the
/// QR decomposition of an upper Hessenberg matrix `mat`, with an
/// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix
/// `mat`, and \f$s\f$ is the shift.
///
/// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
/// Only the upper triangular and the lower subdiagonal parts of
/// the matrix are used.
///
UpperHessenbergQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) :
m_n(mat.rows()),
m_shift(shift),
m_rot_cos(m_n - 1),
m_rot_sin(m_n - 1),
m_computed(false)
{
compute(mat, shift);
}
///
/// Virtual destructor.
///
virtual ~UpperHessenbergQR(){};
///
/// Conduct the QR factorization of an upper Hessenberg matrix with
/// an optional shift.
///
/// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
/// Only the upper triangular and the lower subdiagonal parts of
/// the matrix are used.
///
virtual void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0))
{
m_n = mat.rows();
if (m_n != mat.cols())
throw std::invalid_argument("UpperHessenbergQR: matrix must be square");
m_shift = shift;
m_mat_T.resize(m_n, m_n);
m_rot_cos.resize(m_n - 1);
m_rot_sin.resize(m_n - 1);
// Make a copy of mat - s * I
std::copy(mat.data(), mat.data() + mat.size(), m_mat_T.data());
m_mat_T.diagonal().array() -= m_shift;
Scalar xi, xj, r, c, s;
Scalar *Tii, *ptr;
const Index n1 = m_n - 1;
for (Index i = 0; i < n1; i++)
{
Tii = &m_mat_T.coeffRef(i, i);
// Make sure mat_T is upper Hessenberg
// Zero the elements below mat_T(i + 1, i)
std::fill(Tii + 2, Tii + m_n - i, Scalar(0));
xi = Tii[0]; // mat_T(i, i)
xj = Tii[1]; // mat_T(i + 1, i)
compute_rotation(xi, xj, r, c, s);
m_rot_cos[i] = c;
m_rot_sin[i] = s;
// For a complete QR decomposition,
// we first obtain the rotation matrix
// G = [ cos sin]
// [-sin cos]
// and then do T[i:(i + 1), i:(n - 1)] = G' * T[i:(i + 1), i:(n - 1)]
// Gt << c, -s, s, c;
// m_mat_T.block(i, i, 2, m_n - i) = Gt * m_mat_T.block(i, i, 2, m_n - i);
Tii[0] = r; // m_mat_T(i, i) => r
Tii[1] = 0; // m_mat_T(i + 1, i) => 0
ptr = Tii + m_n; // m_mat_T(i, k), k = i+1, i+2, ..., n-1
for (Index j = i + 1; j < m_n; j++, ptr += m_n)
{
Scalar tmp = ptr[0];
ptr[0] = c * tmp - s * ptr[1];
ptr[1] = s * tmp + c * ptr[1];
}
// If we do not need to calculate the R matrix, then
// only the cos and sin sequences are required.
// In such case we only update T[i + 1, (i + 1):(n - 1)]
// m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) *= c;
// m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) += s * mat_T.block(i, i + 1, 1, m_n - i - 1);
}
m_computed = true;
}
///
/// Return the \f$R\f$ matrix in the QR decomposition, which is an
/// upper triangular matrix.
///
/// \return Returned matrix type will be `Eigen::Matrix<Scalar, ...>`, depending on
/// the template parameter `Scalar` defined.
///
virtual Matrix matrix_R() const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
return m_mat_T;
}
///
/// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`,
/// and \f$s\f$ is the shift. The result is an upper Hessenberg matrix.
///
/// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix<Scalar, ...>`,
/// depending on the template parameter `Scalar` defined.
///
virtual void matrix_QtHQ(Matrix& dest) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
// Make a copy of the R matrix
dest.resize(m_n, m_n);
std::copy(m_mat_T.data(), m_mat_T.data() + m_mat_T.size(), dest.data());
// Compute the RQ matrix
const Index n1 = m_n - 1;
for (Index i = 0; i < n1; i++)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// RQ[, i:(i + 1)] = RQ[, i:(i + 1)] * Gi
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
Scalar *Yi, *Yi1;
Yi = &dest.coeffRef(0, i);
Yi1 = Yi + m_n; // RQ(0, i + 1)
const Index i2 = i + 2;
for (Index j = 0; j < i2; j++)
{
const Scalar tmp = Yi[j];
Yi[j] = c * tmp - s * Yi1[j];
Yi1[j] = s * tmp + c * Yi1[j];
}
/* Vector dest = RQ.block(0, i, i + 2, 1);
dest.block(0, i, i + 2, 1) = c * Yi - s * dest.block(0, i + 1, i + 2, 1);
dest.block(0, i + 1, i + 2, 1) = s * Yi + c * dest.block(0, i + 1, i + 2, 1); */
}
// Add the shift to the diagonal
dest.diagonal().array() += m_shift;
}
///
/// Apply the \f$Q\f$ matrix to a vector \f$y\f$.
///
/// \param Y A vector that will be overwritten by the matrix product \f$Qy\f$.
///
/// Vector type can be `Eigen::Vector<Scalar, ...>`, depending on
/// the template parameter `Scalar` defined.
///
// Y -> QY = G1 * G2 * ... * Y
void apply_QY(Vector& Y) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
for (Index i = m_n - 2; i >= 0; i--)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// Y[i:(i + 1)] = Gi * Y[i:(i + 1)]
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
const Scalar tmp = Y[i];
Y[i] = c * tmp + s * Y[i + 1];
Y[i + 1] = -s * tmp + c * Y[i + 1];
}
}
///
/// Apply the \f$Q\f$ matrix to a vector \f$y\f$.
///
/// \param Y A vector that will be overwritten by the matrix product \f$Q'y\f$.
///
/// Vector type can be `Eigen::Vector<Scalar, ...>`, depending on
/// the template parameter `Scalar` defined.
///
// Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y
void apply_QtY(Vector& Y) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
const Index n1 = m_n - 1;
for (Index i = 0; i < n1; i++)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// Y[i:(i + 1)] = Gi' * Y[i:(i + 1)]
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
const Scalar tmp = Y[i];
Y[i] = c * tmp - s * Y[i + 1];
Y[i + 1] = s * tmp + c * Y[i + 1];
}
}
///
/// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$.
///
/// \param Y A matrix that will be overwritten by the matrix product \f$QY\f$.
///
/// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
// Y -> QY = G1 * G2 * ... * Y
void apply_QY(GenericMatrix Y) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
RowVector Yi(Y.cols()), Yi1(Y.cols());
for (Index i = m_n - 2; i >= 0; i--)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// Y[i:(i + 1), ] = Gi * Y[i:(i + 1), ]
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
Yi.noalias() = Y.row(i);
Yi1.noalias() = Y.row(i + 1);
Y.row(i) = c * Yi + s * Yi1;
Y.row(i + 1) = -s * Yi + c * Yi1;
}
}
///
/// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$.
///
/// \param Y A matrix that will be overwritten by the matrix product \f$Q'Y\f$.
///
/// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
// Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y
void apply_QtY(GenericMatrix Y) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
RowVector Yi(Y.cols()), Yi1(Y.cols());
const Index n1 = m_n - 1;
for (Index i = 0; i < n1; i++)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// Y[i:(i + 1), ] = Gi' * Y[i:(i + 1), ]
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
Yi.noalias() = Y.row(i);
Yi1.noalias() = Y.row(i + 1);
Y.row(i) = c * Yi - s * Yi1;
Y.row(i + 1) = s * Yi + c * Yi1;
}
}
///
/// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$.
///
/// \param Y A matrix that will be overwritten by the matrix product \f$YQ\f$.
///
/// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
// Y -> YQ = Y * G1 * G2 * ...
void apply_YQ(GenericMatrix Y) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
/*Vector Yi(Y.rows());
for(Index i = 0; i < m_n - 1; i++)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
Yi.noalias() = Y.col(i);
Y.col(i) = c * Yi - s * Y.col(i + 1);
Y.col(i + 1) = s * Yi + c * Y.col(i + 1);
}*/
Scalar *Y_col_i, *Y_col_i1;
const Index n1 = m_n - 1;
const Index nrow = Y.rows();
for (Index i = 0; i < n1; i++)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
Y_col_i = &Y.coeffRef(0, i);
Y_col_i1 = &Y.coeffRef(0, i + 1);
for (Index j = 0; j < nrow; j++)
{
Scalar tmp = Y_col_i[j];
Y_col_i[j] = c * tmp - s * Y_col_i1[j];
Y_col_i1[j] = s * tmp + c * Y_col_i1[j];
}
}
}
///
/// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$.
///
/// \param Y A matrix that will be overwritten by the matrix product \f$YQ'\f$.
///
/// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
// Y -> YQ' = Y * G_{n-1}' * ... * G2' * G1'
void apply_YQt(GenericMatrix Y) const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
Vector Yi(Y.rows());
for (Index i = m_n - 2; i >= 0; i--)
{
const Scalar c = m_rot_cos.coeff(i);
const Scalar s = m_rot_sin.coeff(i);
// Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi'
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
Yi.noalias() = Y.col(i);
Y.col(i) = c * Yi + s * Y.col(i + 1);
Y.col(i + 1) = -s * Yi + c * Y.col(i + 1);
}
}
};
///
/// \ingroup LinearAlgebra
///
/// Perform the QR decomposition of a tridiagonal matrix, a special
/// case of upper Hessenberg matrices.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
///
template <typename Scalar = double>
class TridiagQR : public UpperHessenbergQR<Scalar>
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
typedef typename Matrix::Index Index;
Vector m_T_diag; // diagonal elements of T
Vector m_T_lsub; // lower subdiagonal of T
Vector m_T_usub; // upper subdiagonal of T
Vector m_T_usub2; // 2nd upper subdiagonal of T
public:
///
/// Constructor to preallocate memory. Computation can
/// be performed later by calling the compute() method.
///
TridiagQR(Index size) :
UpperHessenbergQR<Scalar>(size)
{}
///
/// Constructor to create an object that performs and stores the
/// QR decomposition of an upper Hessenberg matrix `mat`, with an
/// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix
/// `mat`, and \f$s\f$ is the shift.
///
/// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
/// Only the major- and sub- diagonal parts of
/// the matrix are used.
///
TridiagQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) :
UpperHessenbergQR<Scalar>(mat.rows())
{
this->compute(mat, shift);
}
///
/// Conduct the QR factorization of a tridiagonal matrix with an
/// optional shift.
///
/// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
/// Only the major- and sub- diagonal parts of
/// the matrix are used.
///
void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0))
{
this->m_n = mat.rows();
if (this->m_n != mat.cols())
throw std::invalid_argument("TridiagQR: matrix must be square");
this->m_shift = shift;
m_T_diag.resize(this->m_n);
m_T_lsub.resize(this->m_n - 1);
m_T_usub.resize(this->m_n - 1);
m_T_usub2.resize(this->m_n - 2);
this->m_rot_cos.resize(this->m_n - 1);
this->m_rot_sin.resize(this->m_n - 1);
m_T_diag.array() = mat.diagonal().array() - this->m_shift;
m_T_lsub.noalias() = mat.diagonal(-1);
m_T_usub.noalias() = m_T_lsub;
// A number of pointers to avoid repeated address calculation
Scalar *c = this->m_rot_cos.data(), // pointer to the cosine vector
*s = this->m_rot_sin.data(), // pointer to the sine vector
r;
const Index n1 = this->m_n - 1;
for (Index i = 0; i < n1; i++)
{
// diag[i] == T[i, i]
// lsub[i] == T[i + 1, i]
// r = sqrt(T[i, i]^2 + T[i + 1, i]^2)
// c = T[i, i] / r, s = -T[i + 1, i] / r
this->compute_rotation(m_T_diag.coeff(i), m_T_lsub.coeff(i), r, *c, *s);
// For a complete QR decomposition,
// we first obtain the rotation matrix
// G = [ cos sin]
// [-sin cos]
// and then do T[i:(i + 1), i:(i + 2)] = G' * T[i:(i + 1), i:(i + 2)]
// Update T[i, i] and T[i + 1, i]
// The updated value of T[i, i] is known to be r
// The updated value of T[i + 1, i] is known to be 0
m_T_diag.coeffRef(i) = r;
m_T_lsub.coeffRef(i) = Scalar(0);
// Update T[i, i + 1] and T[i + 1, i + 1]
// usub[i] == T[i, i + 1]
// diag[i + 1] == T[i + 1, i + 1]
const Scalar tmp = m_T_usub.coeff(i);
m_T_usub.coeffRef(i) = (*c) * tmp - (*s) * m_T_diag.coeff(i + 1);
m_T_diag.coeffRef(i + 1) = (*s) * tmp + (*c) * m_T_diag.coeff(i + 1);
// Update T[i, i + 2] and T[i + 1, i + 2]
// usub2[i] == T[i, i + 2]
// usub[i + 1] == T[i + 1, i + 2]
if (i < n1 - 1)
{
m_T_usub2.coeffRef(i) = -(*s) * m_T_usub.coeff(i + 1);
m_T_usub.coeffRef(i + 1) *= (*c);
}
c++;
s++;
// If we do not need to calculate the R matrix, then
// only the cos and sin sequences are required.
// In such case we only update T[i + 1, (i + 1):(i + 2)]
// T[i + 1, i + 1] = c * T[i + 1, i + 1] + s * T[i, i + 1];
// T[i + 1, i + 2] *= c;
}
this->m_computed = true;
}
///
/// Return the \f$R\f$ matrix in the QR decomposition, which is an
/// upper triangular matrix.
///
/// \return Returned matrix type will be `Eigen::Matrix<Scalar, ...>`, depending on
/// the template parameter `Scalar` defined.
///
Matrix matrix_R() const
{
if (!this->m_computed)
throw std::logic_error("TridiagQR: need to call compute() first");
Matrix R = Matrix::Zero(this->m_n, this->m_n);
R.diagonal().noalias() = m_T_diag;
R.diagonal(1).noalias() = m_T_usub;
R.diagonal(2).noalias() = m_T_usub2;
return R;
}
///
/// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`,
/// and \f$s\f$ is the shift. The result is a tridiagonal matrix.
///
/// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix<Scalar, ...>`,
/// depending on the template parameter `Scalar` defined.
///
void matrix_QtHQ(Matrix& dest) const
{
if (!this->m_computed)
throw std::logic_error("TridiagQR: need to call compute() first");
// Make a copy of the R matrix
dest.resize(this->m_n, this->m_n);
dest.setZero();
dest.diagonal().noalias() = m_T_diag;
// The upper diagonal refers to m_T_usub
// The 2nd upper subdiagonal will be zero in RQ
// Compute the RQ matrix
// [m11 m12] points to RQ[i:(i+1), i:(i+1)]
// [0 m22]
//
// Gi = [ cos[i] sin[i]]
// [-sin[i] cos[i]]
const Index n1 = this->m_n - 1;
for (Index i = 0; i < n1; i++)
{
const Scalar c = this->m_rot_cos.coeff(i);
const Scalar s = this->m_rot_sin.coeff(i);
const Scalar m11 = dest.coeff(i, i),
m12 = m_T_usub.coeff(i),
m22 = m_T_diag.coeff(i + 1);
// Update the diagonal and the lower subdiagonal of dest
dest.coeffRef(i, i) = c * m11 - s * m12;
dest.coeffRef(i + 1, i) = -s * m22;
dest.coeffRef(i + 1, i + 1) = c * m22;
}
// Copy the lower subdiagonal to upper subdiagonal
dest.diagonal(1).noalias() = dest.diagonal(-1);
// Add the shift to the diagonal
dest.diagonal().array() += this->m_shift;
}
};
///
/// @}
///
} // namespace Spectra
#endif // UPPER_HESSENBERG_QR_H

View File

@ -0,0 +1,108 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DENSE_CHOLESKY_H
#define DENSE_CHOLESKY_H
#include <Eigen/Core>
#include <Eigen/Cholesky>
#include <stdexcept>
#include "../Util/CompInfo.h"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the operations related to Cholesky decomposition on a
/// positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular
/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver
/// in the Cholesky decomposition mode.
///
template <typename Scalar, int Uplo = Eigen::Lower>
class DenseCholesky
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Matrix> MapConstMat;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
const Index m_n;
Eigen::LLT<Matrix, Uplo> m_decomp;
int m_info; // status of the decomposition
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
DenseCholesky(ConstGenericMatrix& mat) :
m_n(mat.rows()), m_info(NOT_COMPUTED)
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("DenseCholesky: matrix must be square");
m_decomp.compute(mat);
m_info = (m_decomp.info() == Eigen::Success) ?
SUCCESSFUL :
NUMERICAL_ISSUE;
}
///
/// Returns the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Returns the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
int info() const { return m_info; }
///
/// Performs the lower triangular solving operation \f$y=L^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L) * x_in
void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.matrixL().solve(x);
}
///
/// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L') * x_in
void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.matrixU().solve(x);
}
};
} // namespace Spectra
#endif // DENSE_CHOLESKY_H

View File

@ -0,0 +1,102 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DENSE_GEN_COMPLEX_SHIFT_SOLVE_H
#define DENSE_GEN_COMPLEX_SHIFT_SOLVE_H
#include <Eigen/Core>
#include <Eigen/LU>
#include <stdexcept>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the complex shift-solve operation on a general real matrix \f$A\f$,
/// i.e., calculating \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$ for any complex-valued
/// \f$\sigma\f$ and real-valued vector \f$x\f$. It is mainly used in the
/// GenEigsComplexShiftSolver eigen solver.
///
template <typename Scalar>
class DenseGenComplexShiftSolve
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
typedef std::complex<Scalar> Complex;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
typedef Eigen::PartialPivLU<ComplexMatrix> ComplexSolver;
ConstGenericMatrix m_mat;
const Index m_n;
ComplexSolver m_solver;
ComplexVector m_x_cache;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
DenseGenComplexShiftSolve(ConstGenericMatrix& mat) :
m_mat(mat), m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("DenseGenComplexShiftSolve: matrix must be square");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the complex shift \f$\sigma\f$.
///
/// \param sigmar Real part of \f$\sigma\f$.
/// \param sigmai Imaginary part of \f$\sigma\f$.
///
void set_shift(Scalar sigmar, Scalar sigmai)
{
m_solver.compute(m_mat.template cast<Complex>() - Complex(sigmar, sigmai) * ComplexMatrix::Identity(m_n, m_n));
m_x_cache.resize(m_n);
m_x_cache.setZero();
}
///
/// Perform the complex shift-solve operation
/// \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = Re( inv(A - sigma * I) * x_in )
void perform_op(const Scalar* x_in, Scalar* y_out)
{
m_x_cache.real() = MapConstVec(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(m_x_cache).real();
}
};
} // namespace Spectra
#endif // DENSE_GEN_COMPLEX_SHIFT_SOLVE_H

View File

@ -0,0 +1,80 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DENSE_GEN_MAT_PROD_H
#define DENSE_GEN_MAT_PROD_H
#include <Eigen/Core>
namespace Spectra {
///
/// \defgroup MatOp Matrix Operations
///
/// Define matrix operations on existing matrix objects
///
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// general real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the GenEigsSolver and
/// SymEigsSolver eigen solvers.
///
template <typename Scalar>
class DenseGenMatProd
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
ConstGenericMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
DenseGenMatProd(ConstGenericMatrix& mat) :
m_mat(mat)
{}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat * x;
}
};
} // namespace Spectra
#endif // DENSE_GEN_MAT_PROD_H

View File

@ -0,0 +1,88 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DENSE_GEN_REAL_SHIFT_SOLVE_H
#define DENSE_GEN_REAL_SHIFT_SOLVE_H
#include <Eigen/Core>
#include <Eigen/LU>
#include <stdexcept>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the shift-solve operation on a general real matrix \f$A\f$,
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver.
///
template <typename Scalar>
class DenseGenRealShiftSolve
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
ConstGenericMatrix m_mat;
const Index m_n;
Eigen::PartialPivLU<Matrix> m_solver;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
DenseGenRealShiftSolve(ConstGenericMatrix& mat) :
m_mat(mat), m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("DenseGenRealShiftSolve: matrix must be square");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(Scalar sigma)
{
m_solver.compute(m_mat - sigma * Matrix::Identity(m_n, m_n));
}
///
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * I) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(x);
}
};
} // namespace Spectra
#endif // DENSE_GEN_REAL_SHIFT_SOLVE_H

View File

@ -0,0 +1,73 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DENSE_SYM_MAT_PROD_H
#define DENSE_SYM_MAT_PROD_H
#include <Eigen/Core>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// symmetric real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver.
///
template <typename Scalar, int Uplo = Eigen::Lower>
class DenseSymMatProd
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
ConstGenericMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
DenseSymMatProd(ConstGenericMatrix& mat) :
m_mat(mat)
{}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
}
};
} // namespace Spectra
#endif // DENSE_SYM_MAT_PROD_H

View File

@ -0,0 +1,92 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef DENSE_SYM_SHIFT_SOLVE_H
#define DENSE_SYM_SHIFT_SOLVE_H
#include <Eigen/Core>
#include <stdexcept>
#include "../LinAlg/BKLDLT.h"
#include "../Util/CompInfo.h"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the shift-solve operation on a real symmetric matrix \f$A\f$,
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver.
///
template <typename Scalar, int Uplo = Eigen::Lower>
class DenseSymShiftSolve
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
ConstGenericMatrix m_mat;
const int m_n;
BKLDLT<Scalar> m_solver;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
DenseSymShiftSolve(ConstGenericMatrix& mat) :
m_mat(mat), m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("DenseSymShiftSolve: matrix must be square");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(Scalar sigma)
{
m_solver.compute(m_mat, Uplo, sigma);
if (m_solver.info() != SUCCESSFUL)
throw std::invalid_argument("DenseSymShiftSolve: factorization failed with the given shift");
}
///
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * I) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(x);
}
};
} // namespace Spectra
#endif // DENSE_SYM_SHIFT_SOLVE_H

View File

@ -0,0 +1,109 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPARSE_CHOLESKY_H
#define SPARSE_CHOLESKY_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/SparseCholesky>
#include <stdexcept>
#include "../Util/CompInfo.h"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the operations related to Cholesky decomposition on a
/// sparse positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular
/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver
/// in the Cholesky decomposition mode.
///
template <typename Scalar, int Uplo = Eigen::Lower, int Flags = 0, typename StorageIndex = int>
class SparseCholesky
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
const Index m_n;
Eigen::SimplicialLLT<SparseMatrix, Uplo> m_decomp;
int m_info; // status of the decomposition
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
SparseCholesky(ConstGenericSparseMatrix& mat) :
m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("SparseCholesky: matrix must be square");
m_decomp.compute(mat);
m_info = (m_decomp.info() == Eigen::Success) ?
SUCCESSFUL :
NUMERICAL_ISSUE;
}
///
/// Returns the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Returns the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
int info() const { return m_info; }
///
/// Performs the lower triangular solving operation \f$y=L^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L) * x_in
void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.permutationP() * x;
m_decomp.matrixL().solveInPlace(y);
}
///
/// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L') * x_in
void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.matrixU().solve(x);
y = m_decomp.permutationPinv() * y;
}
};
} // namespace Spectra
#endif // SPARSE_CHOLESKY_H

View File

@ -0,0 +1,74 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPARSE_GEN_MAT_PROD_H
#define SPARSE_GEN_MAT_PROD_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// sparse real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the GenEigsSolver and SymEigsSolver
/// eigen solvers.
///
template <typename Scalar, int Flags = 0, typename StorageIndex = int>
class SparseGenMatProd
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
ConstGenericSparseMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
SparseGenMatProd(ConstGenericSparseMatrix& mat) :
m_mat(mat)
{}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat * x;
}
};
} // namespace Spectra
#endif // SPARSE_GEN_MAT_PROD_H

View File

@ -0,0 +1,93 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPARSE_GEN_REAL_SHIFT_SOLVE_H
#define SPARSE_GEN_REAL_SHIFT_SOLVE_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/SparseLU>
#include <stdexcept>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the shift-solve operation on a sparse real matrix \f$A\f$,
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver.
///
template <typename Scalar, int Flags = 0, typename StorageIndex = int>
class SparseGenRealShiftSolve
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
ConstGenericSparseMatrix m_mat;
const int m_n;
Eigen::SparseLU<SparseMatrix> m_solver;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
SparseGenRealShiftSolve(ConstGenericSparseMatrix& mat) :
m_mat(mat), m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("SparseGenRealShiftSolve: matrix must be square");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(Scalar sigma)
{
SparseMatrix I(m_n, m_n);
I.setIdentity();
m_solver.compute(m_mat - sigma * I);
if (m_solver.info() != Eigen::Success)
throw std::invalid_argument("SparseGenRealShiftSolve: factorization failed with the given shift");
}
///
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * I) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(x);
}
};
} // namespace Spectra
#endif // SPARSE_GEN_REAL_SHIFT_SOLVE_H

View File

@ -0,0 +1,100 @@
// Copyright (C) 2017-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPARSE_REGULAR_INVERSE_H
#define SPARSE_REGULAR_INVERSE_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/IterativeLinearSolvers>
#include <stdexcept>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines matrix operations required by the generalized eigen solver
/// in the regular inverse mode. For a sparse and positive definite matrix \f$B\f$,
/// it implements the matrix-vector product \f$y=Bx\f$ and the linear equation
/// solving operation \f$y=B^{-1}x\f$.
///
/// This class is intended to be used with the SymGEigsSolver generalized eigen solver
/// in the regular inverse mode.
///
template <typename Scalar, int Uplo = Eigen::Lower, int Flags = 0, typename StorageIndex = int>
class SparseRegularInverse
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
ConstGenericSparseMatrix m_mat;
const int m_n;
Eigen::ConjugateGradient<SparseMatrix> m_cg;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
SparseRegularInverse(ConstGenericSparseMatrix& mat) :
m_mat(mat), m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("SparseRegularInverse: matrix must be square");
m_cg.compute(mat);
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Perform the solving operation \f$y=B^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(B) * x_in
void solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_cg.solve(x);
}
///
/// Perform the matrix-vector multiplication operation \f$y=Bx\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = B * x_in
void mat_prod(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
}
};
} // namespace Spectra
#endif // SPARSE_REGULAR_INVERSE_H

View File

@ -0,0 +1,73 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPARSE_SYM_MAT_PROD_H
#define SPARSE_SYM_MAT_PROD_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// sparse real symmetric matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver.
///
template <typename Scalar, int Uplo = Eigen::Lower, int Flags = 0, typename StorageIndex = int>
class SparseSymMatProd
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
ConstGenericSparseMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
SparseSymMatProd(ConstGenericSparseMatrix& mat) :
m_mat(mat)
{}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
}
};
} // namespace Spectra
#endif // SPARSE_SYM_MAT_PROD_H

View File

@ -0,0 +1,95 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPARSE_SYM_SHIFT_SOLVE_H
#define SPARSE_SYM_SHIFT_SOLVE_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/SparseLU>
#include <stdexcept>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the shift-solve operation on a sparse real symmetric matrix \f$A\f$,
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver.
///
template <typename Scalar, int Uplo = Eigen::Lower, int Flags = 0, typename StorageIndex = int>
class SparseSymShiftSolve
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
ConstGenericSparseMatrix m_mat;
const int m_n;
Eigen::SparseLU<SparseMatrix> m_solver;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
SparseSymShiftSolve(ConstGenericSparseMatrix& mat) :
m_mat(mat), m_n(mat.rows())
{
if (mat.rows() != mat.cols())
throw std::invalid_argument("SparseSymShiftSolve: matrix must be square");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(Scalar sigma)
{
SparseMatrix mat = m_mat.template selfadjointView<Uplo>();
SparseMatrix identity(m_n, m_n);
identity.setIdentity();
mat = mat - sigma * identity;
m_solver.isSymmetric(true);
m_solver.compute(mat);
if (m_solver.info() != Eigen::Success)
throw std::invalid_argument("SparseSymShiftSolve: factorization failed with the given shift");
}
///
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * I) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(x);
}
};
} // namespace Spectra
#endif // SPARSE_SYM_SHIFT_SOLVE_H

View File

@ -0,0 +1,150 @@
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef ARNOLDI_OP_H
#define ARNOLDI_OP_H
#include <Eigen/Core>
#include <cmath> // std::sqrt
namespace Spectra {
///
/// \ingroup Internals
/// @{
///
///
/// \defgroup Operators Operators
///
/// Different types of operators.
///
///
/// \ingroup Operators
///
/// Operators used in the Arnoldi factorization.
///
template <typename Scalar, typename OpType, typename BOpType>
class ArnoldiOp
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
OpType& m_op;
BOpType& m_Bop;
Vector m_cache;
public:
ArnoldiOp(OpType* op, BOpType* Bop) :
m_op(*op), m_Bop(*Bop), m_cache(op->rows())
{}
inline Index rows() const { return m_op.rows(); }
// In generalized eigenvalue problem Ax=lambda*Bx, define the inner product to be <x, y> = x'By.
// For regular eigenvalue problems, it is the usual inner product <x, y> = x'y
// Compute <x, y> = x'By
// x and y are two vectors
template <typename Arg1, typename Arg2>
Scalar inner_product(const Arg1& x, const Arg2& y)
{
m_Bop.mat_prod(y.data(), m_cache.data());
return x.dot(m_cache);
}
// Compute res = <X, y> = X'By
// X is a matrix, y is a vector, res is a vector
template <typename Arg1, typename Arg2>
void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref<Vector> res)
{
m_Bop.mat_prod(y.data(), m_cache.data());
res.noalias() = x.transpose() * m_cache;
}
// B-norm of a vector, ||x||_B = sqrt(x'Bx)
template <typename Arg>
Scalar norm(const Arg& x)
{
using std::sqrt;
return sqrt(inner_product<Arg, Arg>(x, x));
}
// The "A" operator to generate the Krylov subspace
inline void perform_op(const Scalar* x_in, Scalar* y_out)
{
m_op.perform_op(x_in, y_out);
}
};
///
/// \ingroup Operators
///
/// Placeholder for the B-operator when \f$B = I\f$.
///
class IdentityBOp
{};
///
/// \ingroup Operators
///
/// Partial specialization for the case \f$B = I\f$.
///
template <typename Scalar, typename OpType>
class ArnoldiOp<Scalar, OpType, IdentityBOp>
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
OpType& m_op;
public:
ArnoldiOp<Scalar, OpType, IdentityBOp>(OpType* op, IdentityBOp* /*Bop*/) :
m_op(*op)
{}
inline Index rows() const { return m_op.rows(); }
// Compute <x, y> = x'y
// x and y are two vectors
template <typename Arg1, typename Arg2>
Scalar inner_product(const Arg1& x, const Arg2& y) const
{
return x.dot(y);
}
// Compute res = <X, y> = X'y
// X is a matrix, y is a vector, res is a vector
template <typename Arg1, typename Arg2>
void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref<Vector> res) const
{
res.noalias() = x.transpose() * y;
}
// B-norm of a vector. For regular eigenvalue problems it is simply the L2 norm
template <typename Arg>
Scalar norm(const Arg& x)
{
return x.norm();
}
// The "A" operator to generate the Krylov subspace
inline void perform_op(const Scalar* x_in, Scalar* y_out)
{
m_op.perform_op(x_in, y_out);
}
};
///
/// @}
///
} // namespace Spectra
#endif // ARNOLDI_OP_H

View File

@ -0,0 +1,75 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SYM_GEIGS_CHOLESKY_OP_H
#define SYM_GEIGS_CHOLESKY_OP_H
#include <Eigen/Core>
#include "../DenseSymMatProd.h"
#include "../DenseCholesky.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// Cholesky decomposition mode. It calculates \f$y=L^{-1}A(L')^{-1}x\f$ for any
/// vector \f$x\f$, where \f$L\f$ is the Cholesky decomposition of \f$B\f$.
/// This class is intended for internal use.
///
template <typename Scalar = double,
typename OpType = DenseSymMatProd<double>,
typename BOpType = DenseCholesky<double> >
class SymGEigsCholeskyOp
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
OpType& m_op;
BOpType& m_Bop;
Vector m_cache; // temporary working space
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op Pointer to the \f$A\f$ matrix operation object.
/// \param Bop Pointer to the \f$B\f$ matrix operation object.
///
SymGEigsCholeskyOp(OpType& op, BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_Bop.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_Bop.rows(); }
///
/// Perform the matrix operation \f$y=L^{-1}A(L')^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L) * A * inv(L') * x_in
void perform_op(const Scalar* x_in, Scalar* y_out)
{
m_Bop.upper_triangular_solve(x_in, y_out);
m_op.perform_op(y_out, m_cache.data());
m_Bop.lower_triangular_solve(m_cache.data(), y_out);
}
};
} // namespace Spectra
#endif // SYM_GEIGS_CHOLESKY_OP_H

View File

@ -0,0 +1,72 @@
// Copyright (C) 2017-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SYM_GEIGS_REG_INV_OP_H
#define SYM_GEIGS_REG_INV_OP_H
#include <Eigen/Core>
#include "../SparseSymMatProd.h"
#include "../SparseRegularInverse.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// regular inverse mode. This class is intended for internal use.
///
template <typename Scalar = double,
typename OpType = SparseSymMatProd<double>,
typename BOpType = SparseRegularInverse<double> >
class SymGEigsRegInvOp
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
OpType& m_op;
BOpType& m_Bop;
Vector m_cache; // temporary working space
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op Pointer to the \f$A\f$ matrix operation object.
/// \param Bop Pointer to the \f$B\f$ matrix operation object.
///
SymGEigsRegInvOp(OpType& op, BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_Bop.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_Bop.rows(); }
///
/// Perform the matrix operation \f$y=B^{-1}Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(B) * A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out)
{
m_op.perform_op(x_in, m_cache.data());
m_Bop.solve(m_cache.data(), y_out);
}
};
} // namespace Spectra
#endif // SYM_GEIGS_REG_INV_OP_H

448
gtsam/3rdparty/Spectra/SymEigsBase.h vendored Normal file
View File

@ -0,0 +1,448 @@
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SYM_EIGS_BASE_H
#define SYM_EIGS_BASE_H
#include <Eigen/Core>
#include <vector> // std::vector
#include <cmath> // std::abs, std::pow, std::sqrt
#include <algorithm> // std::min, std::copy
#include <stdexcept> // std::invalid_argument
#include "Util/TypeTraits.h"
#include "Util/SelectionRule.h"
#include "Util/CompInfo.h"
#include "Util/SimpleRandom.h"
#include "MatOp/internal/ArnoldiOp.h"
#include "LinAlg/UpperHessenbergQR.h"
#include "LinAlg/TridiagEigen.h"
#include "LinAlg/Lanczos.h"
namespace Spectra {
///
/// \defgroup EigenSolver Eigen Solvers
///
/// Eigen solvers for different types of problems.
///
///
/// \ingroup EigenSolver
///
/// This is the base class for symmetric eigen solvers, mainly for internal use.
/// It is kept here to provide the documentation for member functions of concrete eigen solvers
/// such as SymEigsSolver and SymEigsShiftSolver.
///
template <typename Scalar,
int SelectionRule,
typename OpType,
typename BOpType>
class SymEigsBase
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> Array;
typedef Eigen::Array<bool, Eigen::Dynamic, 1> BoolArray;
typedef Eigen::Map<Matrix> MapMat;
typedef Eigen::Map<Vector> MapVec;
typedef Eigen::Map<const Vector> MapConstVec;
typedef ArnoldiOp<Scalar, OpType, BOpType> ArnoldiOpType;
typedef Lanczos<Scalar, ArnoldiOpType> LanczosFac;
protected:
// clang-format off
OpType* m_op; // object to conduct matrix operation,
// e.g. matrix-vector product
const Index m_n; // dimension of matrix A
const Index m_nev; // number of eigenvalues requested
const Index m_ncv; // dimension of Krylov subspace in the Lanczos method
Index m_nmatop; // number of matrix operations called
Index m_niter; // number of restarting iterations
LanczosFac m_fac; // Lanczos factorization
Vector m_ritz_val; // Ritz values
private:
Matrix m_ritz_vec; // Ritz vectors
Vector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates
BoolArray m_ritz_conv; // indicator of the convergence of Ritz values
int m_info; // status of the computation
const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow
// ~= 1e-307 for the "double" type
const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type
const Scalar m_eps23; // m_eps^(2/3), used to test the convergence
// clang-format on
// Implicitly restarted Lanczos factorization
void restart(Index k)
{
if (k >= m_ncv)
return;
TridiagQR<Scalar> decomp(m_ncv);
Matrix Q = Matrix::Identity(m_ncv, m_ncv);
for (Index i = k; i < m_ncv; i++)
{
// QR decomposition of H-mu*I, mu is the shift
decomp.compute(m_fac.matrix_H(), m_ritz_val[i]);
// Q -> Q * Qi
decomp.apply_YQ(Q);
// H -> Q'HQ
// Since QR = H - mu * I, we have H = QR + mu * I
// and therefore Q'HQ = RQ + mu * I
m_fac.compress_H(decomp);
}
m_fac.compress_V(Q);
m_fac.factorize_from(k, m_ncv, m_nmatop);
retrieve_ritzpair();
}
// Calculates the number of converged Ritz values
Index num_converged(Scalar tol)
{
// thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value
Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23);
Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm();
// Converged "wanted" Ritz values
m_ritz_conv = (resid < thresh);
return m_ritz_conv.cast<Index>().sum();
}
// Returns the adjusted nev for restarting
Index nev_adjusted(Index nconv)
{
using std::abs;
Index nev_new = m_nev;
for (Index i = m_nev; i < m_ncv; i++)
if (abs(m_ritz_est[i]) < m_near_0)
nev_new++;
// Adjust nev_new, according to dsaup2.f line 677~684 in ARPACK
nev_new += std::min(nconv, (m_ncv - nev_new) / 2);
if (nev_new == 1 && m_ncv >= 6)
nev_new = m_ncv / 2;
else if (nev_new == 1 && m_ncv > 2)
nev_new = 2;
if (nev_new > m_ncv - 1)
nev_new = m_ncv - 1;
return nev_new;
}
// Retrieves and sorts Ritz values and Ritz vectors
void retrieve_ritzpair()
{
TridiagEigen<Scalar> decomp(m_fac.matrix_H());
const Vector& evals = decomp.eigenvalues();
const Matrix& evecs = decomp.eigenvectors();
SortEigenvalue<Scalar, SelectionRule> sorting(evals.data(), evals.size());
std::vector<int> ind = sorting.index();
// For BOTH_ENDS, the eigenvalues are sorted according
// to the LARGEST_ALGE rule, so we need to move those smallest
// values to the left
// The order would be
// Largest => Smallest => 2nd largest => 2nd smallest => ...
// We keep this order since the first k values will always be
// the wanted collection, no matter k is nev_updated (used in restart())
// or is nev (used in sort_ritzpair())
if (SelectionRule == BOTH_ENDS)
{
std::vector<int> ind_copy(ind);
for (Index i = 0; i < m_ncv; i++)
{
// If i is even, pick values from the left (large values)
// If i is odd, pick values from the right (small values)
if (i % 2 == 0)
ind[i] = ind_copy[i / 2];
else
ind[i] = ind_copy[m_ncv - 1 - i / 2];
}
}
// Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively
for (Index i = 0; i < m_ncv; i++)
{
m_ritz_val[i] = evals[ind[i]];
m_ritz_est[i] = evecs(m_ncv - 1, ind[i]);
}
for (Index i = 0; i < m_nev; i++)
{
m_ritz_vec.col(i).noalias() = evecs.col(ind[i]);
}
}
protected:
// Sorts the first nev Ritz pairs in the specified order
// This is used to return the final results
virtual void sort_ritzpair(int sort_rule)
{
// First make sure that we have a valid index vector
SortEigenvalue<Scalar, LARGEST_ALGE> sorting(m_ritz_val.data(), m_nev);
std::vector<int> ind = sorting.index();
switch (sort_rule)
{
case LARGEST_ALGE:
break;
case LARGEST_MAGN:
{
SortEigenvalue<Scalar, LARGEST_MAGN> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
case SMALLEST_ALGE:
{
SortEigenvalue<Scalar, SMALLEST_ALGE> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
case SMALLEST_MAGN:
{
SortEigenvalue<Scalar, SMALLEST_MAGN> sorting(m_ritz_val.data(), m_nev);
ind = sorting.index();
break;
}
default:
throw std::invalid_argument("unsupported sorting rule");
}
Vector new_ritz_val(m_ncv);
Matrix new_ritz_vec(m_ncv, m_nev);
BoolArray new_ritz_conv(m_nev);
for (Index i = 0; i < m_nev; i++)
{
new_ritz_val[i] = m_ritz_val[ind[i]];
new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]);
new_ritz_conv[i] = m_ritz_conv[ind[i]];
}
m_ritz_val.swap(new_ritz_val);
m_ritz_vec.swap(new_ritz_vec);
m_ritz_conv.swap(new_ritz_conv);
}
public:
/// \cond
SymEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) :
m_op(op),
m_n(m_op->rows()),
m_nev(nev),
m_ncv(ncv > m_n ? m_n : ncv),
m_nmatop(0),
m_niter(0),
m_fac(ArnoldiOpType(op, Bop), m_ncv),
m_info(NOT_COMPUTED),
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
m_eps(Eigen::NumTraits<Scalar>::epsilon()),
m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3))
{
if (nev < 1 || nev > m_n - 1)
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix");
if (ncv <= nev || ncv > m_n)
throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix");
}
///
/// Virtual destructor
///
virtual ~SymEigsBase() {}
/// \endcond
///
/// Initializes the solver by providing an initial residual vector.
///
/// \param init_resid Pointer to the initial residual vector.
///
/// **Spectra** (and also **ARPACK**) uses an iterative algorithm
/// to find eigenvalues. This function allows the user to provide the initial
/// residual vector.
///
void init(const Scalar* init_resid)
{
// Reset all matrices/vectors to zero
m_ritz_val.resize(m_ncv);
m_ritz_vec.resize(m_ncv, m_nev);
m_ritz_est.resize(m_ncv);
m_ritz_conv.resize(m_nev);
m_ritz_val.setZero();
m_ritz_vec.setZero();
m_ritz_est.setZero();
m_ritz_conv.setZero();
m_nmatop = 0;
m_niter = 0;
// Initialize the Lanczos factorization
MapConstVec v0(init_resid, m_n);
m_fac.init(v0, m_nmatop);
}
///
/// Initializes the solver by providing a random initial residual vector.
///
/// This overloaded function generates a random initial residual vector
/// (with a fixed random seed) for the algorithm. Elements in the vector
/// follow independent Uniform(-0.5, 0.5) distribution.
///
void init()
{
SimpleRandom<Scalar> rng(0);
Vector init_resid = rng.random_vec(m_n);
init(init_resid.data());
}
///
/// Conducts the major computation procedure.
///
/// \param maxit Maximum number of iterations allowed in the algorithm.
/// \param tol Precision parameter for the calculated eigenvalues.
/// \param sort_rule Rule to sort the eigenvalues and eigenvectors.
/// Supported values are
/// `Spectra::LARGEST_ALGE`, `Spectra::LARGEST_MAGN`,
/// `Spectra::SMALLEST_ALGE` and `Spectra::SMALLEST_MAGN`,
/// for example `LARGEST_ALGE` indicates that largest eigenvalues
/// come first. Note that this argument is only used to
/// **sort** the final result, and the **selection** rule
/// (e.g. selecting the largest or smallest eigenvalues in the
/// full spectrum) is specified by the template parameter
/// `SelectionRule` of SymEigsSolver.
///
/// \return Number of converged eigenvalues.
///
Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_ALGE)
{
// The m-step Lanczos factorization
m_fac.factorize_from(1, m_ncv, m_nmatop);
retrieve_ritzpair();
// Restarting
Index i, nconv = 0, nev_adj;
for (i = 0; i < maxit; i++)
{
nconv = num_converged(tol);
if (nconv >= m_nev)
break;
nev_adj = nev_adjusted(nconv);
restart(nev_adj);
}
// Sorting results
sort_ritzpair(sort_rule);
m_niter += i + 1;
m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING;
return std::min(m_nev, nconv);
}
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
int info() const { return m_info; }
///
/// Returns the number of iterations used in the computation.
///
Index num_iterations() const { return m_niter; }
///
/// Returns the number of matrix operations used in the computation.
///
Index num_operations() const { return m_nmatop; }
///
/// Returns the converged eigenvalues.
///
/// \return A vector containing the eigenvalues.
/// Returned vector type will be `Eigen::Vector<Scalar, ...>`, depending on
/// the template parameter `Scalar` defined.
///
Vector eigenvalues() const
{
const Index nconv = m_ritz_conv.cast<Index>().sum();
Vector res(nconv);
if (!nconv)
return res;
Index j = 0;
for (Index i = 0; i < m_nev; i++)
{
if (m_ritz_conv[i])
{
res[j] = m_ritz_val[i];
j++;
}
}
return res;
}
///
/// Returns the eigenvectors associated with the converged eigenvalues.
///
/// \param nvec The number of eigenvectors to return.
///
/// \return A matrix containing the eigenvectors.
/// Returned matrix type will be `Eigen::Matrix<Scalar, ...>`,
/// depending on the template parameter `Scalar` defined.
///
virtual Matrix eigenvectors(Index nvec) const
{
const Index nconv = m_ritz_conv.cast<Index>().sum();
nvec = std::min(nvec, nconv);
Matrix res(m_n, nvec);
if (!nvec)
return res;
Matrix ritz_vec_conv(m_ncv, nvec);
Index j = 0;
for (Index i = 0; i < m_nev && j < nvec; i++)
{
if (m_ritz_conv[i])
{
ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i);
j++;
}
}
res.noalias() = m_fac.matrix_V() * ritz_vec_conv;
return res;
}
///
/// Returns all converged eigenvectors.
///
virtual Matrix eigenvectors() const
{
return eigenvectors(m_nev);
}
};
} // namespace Spectra
#endif // SYM_EIGS_BASE_H

View File

@ -0,0 +1,203 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SYM_EIGS_SHIFT_SOLVER_H
#define SYM_EIGS_SHIFT_SOLVER_H
#include <Eigen/Core>
#include "SymEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseSymShiftSolve.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for real symmetric matrices using
/// the **shift-and-invert mode**. The background information of the symmetric
/// eigen solver is documented in the SymEigsSolver class. Here we focus on
/// explaining the shift-and-invert mode.
///
/// The shift-and-invert mode is based on the following fact:
/// If \f$\lambda\f$ and \f$x\f$ are a pair of eigenvalue and eigenvector of
/// matrix \f$A\f$, such that \f$Ax=\lambda x\f$, then for any \f$\sigma\f$,
/// we have
/// \f[(A-\sigma I)^{-1}x=\nu x\f]
/// where
/// \f[\nu=\frac{1}{\lambda-\sigma}\f]
/// which indicates that \f$(\nu, x)\f$ is an eigenpair of the matrix
/// \f$(A-\sigma I)^{-1}\f$.
///
/// Therefore, if we pass the matrix operation \f$(A-\sigma I)^{-1}y\f$
/// (rather than \f$Ay\f$) to the eigen solver, then we would get the desired
/// values of \f$\nu\f$, and \f$\lambda\f$ can also be easily obtained by noting
/// that \f$\lambda=\sigma+\nu^{-1}\f$.
///
/// The reason why we need this type of manipulation is that
/// the algorithm of **Spectra** (and also **ARPACK**)
/// is good at finding eigenvalues with large magnitude, but may fail in looking
/// for eigenvalues that are close to zero. However, if we really need them, we
/// can set \f$\sigma=0\f$, find the largest eigenvalues of \f$A^{-1}\f$, and then
/// transform back to \f$\lambda\f$, since in this case largest values of \f$\nu\f$
/// implies smallest values of \f$\lambda\f$.
///
/// To summarize, in the shift-and-invert mode, the selection rule will apply to
/// \f$\nu=1/(\lambda-\sigma)\f$ rather than \f$\lambda\f$. So a selection rule
/// of `LARGEST_MAGN` combined with shift \f$\sigma\f$ will find eigenvalues of
/// \f$A\f$ that are closest to \f$\sigma\f$. But note that the eigenvalues()
/// method will always return the eigenvalues in the original problem (i.e.,
/// returning \f$\lambda\f$ rather than \f$\nu\f$), and eigenvectors are the
/// same for both the original problem and the shifted-and-inverted problem.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the shifted-and-inverted eigenvalues.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseSymShiftSolve and
/// SparseSymShiftSolve, or define their
/// own that implements all the public member functions as in
/// DenseSymShiftSolve.
///
/// Below is an example that illustrates the use of the shift-and-invert mode:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsShiftSolver.h>
/// // <Spectra/MatOp/DenseSymShiftSolve.h> is implicitly included
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // A size-10 diagonal matrix with elements 1, 2, ..., 10
/// Eigen::MatrixXd M = Eigen::MatrixXd::Zero(10, 10);
/// for(int i = 0; i < M.rows(); i++)
/// M(i, i) = i + 1;
///
/// // Construct matrix operation object using the wrapper class
/// DenseSymShiftSolve<double> op(M);
///
/// // Construct eigen solver object with shift 0
/// // This will find eigenvalues that are closest to 0
/// SymEigsShiftSolver< double, LARGEST_MAGN,
/// DenseSymShiftSolve<double> > eigs(&op, 3, 6, 0.0);
///
/// eigs.init();
/// eigs.compute();
/// if(eigs.info() == SUCCESSFUL)
/// {
/// Eigen::VectorXd evalues = eigs.eigenvalues();
/// // Will get (3.0, 2.0, 1.0)
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
/// }
///
/// return 0;
/// }
/// \endcode
///
/// Also an example for user-supplied matrix shift-solve operation class:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsShiftSolver.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// // M = diag(1, 2, ..., 10)
/// class MyDiagonalTenShiftSolve
/// {
/// private:
/// double sigma_;
/// public:
/// int rows() { return 10; }
/// int cols() { return 10; }
/// void set_shift(double sigma) { sigma_ = sigma; }
/// // y_out = inv(A - sigma * I) * x_in
/// // inv(A - sigma * I) = diag(1/(1-sigma), 1/(2-sigma), ...)
/// void perform_op(double *x_in, double *y_out)
/// {
/// for(int i = 0; i < rows(); i++)
/// {
/// y_out[i] = x_in[i] / (i + 1 - sigma_);
/// }
/// }
/// };
///
/// int main()
/// {
/// MyDiagonalTenShiftSolve op;
/// // Find three eigenvalues that are closest to 3.14
/// SymEigsShiftSolver<double, LARGEST_MAGN,
/// MyDiagonalTenShiftSolve> eigs(&op, 3, 6, 3.14);
/// eigs.init();
/// eigs.compute();
/// if(eigs.info() == SUCCESSFUL)
/// {
/// Eigen::VectorXd evalues = eigs.eigenvalues();
/// // Will get (4.0, 3.0, 2.0)
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
/// }
///
/// return 0;
/// }
/// \endcode
///
template <typename Scalar = double,
int SelectionRule = LARGEST_MAGN,
typename OpType = DenseSymShiftSolve<double> >
class SymEigsShiftSolver : public SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
{
private:
typedef Eigen::Index Index;
typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> Array;
const Scalar m_sigma;
// First transform back the Ritz values, and then sort
void sort_ritzpair(int sort_rule)
{
Array m_ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma;
this->m_ritz_val.head(this->m_nev) = m_ritz_val_org;
SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a eigen solver object using the shift-and-invert mode.
///
/// \param op Pointer to the matrix operation object, which should implement
/// the shift-solve operation of \f$A\f$: calculating
/// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseSymShiftSolve, or
/// define their own that implements all the public member functions
/// as in DenseSymShiftSolve.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv_` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
/// \param sigma The value of the shift.
///
SymEigsShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) :
SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv),
m_sigma(sigma)
{
this->m_op->set_shift(m_sigma);
}
};
} // namespace Spectra
#endif // SYM_EIGS_SHIFT_SOLVER_H

171
gtsam/3rdparty/Spectra/SymEigsSolver.h vendored Normal file
View File

@ -0,0 +1,171 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SYM_EIGS_SOLVER_H
#define SYM_EIGS_SOLVER_H
#include <Eigen/Core>
#include "SymEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseSymMatProd.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for real symmetric matrices, i.e.,
/// to solve \f$Ax=\lambda x\f$ where \f$A\f$ is symmetric.
///
/// **Spectra** is designed to calculate a specified number (\f$k\f$)
/// of eigenvalues of a large square matrix (\f$A\f$). Usually \f$k\f$ is much
/// less than the size of the matrix (\f$n\f$), so that only a few eigenvalues
/// and eigenvectors are computed.
///
/// Rather than providing the whole \f$A\f$ matrix, the algorithm only requires
/// the matrix-vector multiplication operation of \f$A\f$. Therefore, users of
/// this solver need to supply a class that computes the result of \f$Av\f$
/// for any given vector \f$v\f$. The name of this class should be given to
/// the template parameter `OpType`, and instance of this class passed to
/// the constructor of SymEigsSolver.
///
/// If the matrix \f$A\f$ is already stored as a matrix object in **Eigen**,
/// for example `Eigen::MatrixXd`, then there is an easy way to construct such
/// matrix operation class, by using the built-in wrapper class DenseSymMatProd
/// which wraps an existing matrix object in **Eigen**. This is also the
/// default template parameter for SymEigsSolver. For sparse matrices, the
/// wrapper class SparseSymMatProd can be used similarly.
///
/// If the users need to define their own matrix-vector multiplication operation
/// class, it should implement all the public member functions as in DenseSymMatProd.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the requested eigenvalues, for example `LARGEST_MAGN`
/// to retrieve eigenvalues with the largest magnitude.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their
/// own that implements all the public member functions as in
/// DenseSymMatProd.
///
/// Below is an example that demonstrates the usage of this class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsSolver.h>
/// // <Spectra/MatOp/DenseSymMatProd.h> is implicitly included
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to calculate the eigenvalues of M
/// Eigen::MatrixXd A = Eigen::MatrixXd::Random(10, 10);
/// Eigen::MatrixXd M = A + A.transpose();
///
/// // Construct matrix operation object using the wrapper class DenseSymMatProd
/// DenseSymMatProd<double> op(M);
///
/// // Construct eigen solver object, requesting the largest three eigenvalues
/// SymEigsSolver< double, LARGEST_ALGE, DenseSymMatProd<double> > eigs(&op, 3, 6);
///
/// // Initialize and compute
/// eigs.init();
/// int nconv = eigs.compute();
///
/// // Retrieve results
/// Eigen::VectorXd evalues;
/// if(eigs.info() == SUCCESSFUL)
/// evalues = eigs.eigenvalues();
///
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
///
/// return 0;
/// }
/// \endcode
///
/// And here is an example for user-supplied matrix operation class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsSolver.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// // M = diag(1, 2, ..., 10)
/// class MyDiagonalTen
/// {
/// public:
/// int rows() { return 10; }
/// int cols() { return 10; }
/// // y_out = M * x_in
/// void perform_op(double *x_in, double *y_out)
/// {
/// for(int i = 0; i < rows(); i++)
/// {
/// y_out[i] = x_in[i] * (i + 1);
/// }
/// }
/// };
///
/// int main()
/// {
/// MyDiagonalTen op;
/// SymEigsSolver<double, LARGEST_ALGE, MyDiagonalTen> eigs(&op, 3, 6);
/// eigs.init();
/// eigs.compute();
/// if(eigs.info() == SUCCESSFUL)
/// {
/// Eigen::VectorXd evalues = eigs.eigenvalues();
/// // Will get (10, 9, 8)
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
/// }
///
/// return 0;
/// }
/// \endcode
///
template <typename Scalar = double,
int SelectionRule = LARGEST_MAGN,
typename OpType = DenseSymMatProd<double> >
class SymEigsSolver : public SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
{
private:
typedef Eigen::Index Index;
public:
///
/// Constructor to create a solver object.
///
/// \param op Pointer to the matrix operation object, which should implement
/// the matrix-vector multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseSymMatProd, or
/// define their own that implements all the public member functions
/// as in DenseSymMatProd.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
///
SymEigsSolver(OpType* op, Index nev, Index ncv) :
SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv)
{}
};
} // namespace Spectra
#endif // SYM_EIGS_SOLVER_H

326
gtsam/3rdparty/Spectra/SymGEigsSolver.h vendored Normal file
View File

@ -0,0 +1,326 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SYM_GEIGS_SOLVER_H
#define SYM_GEIGS_SOLVER_H
#include "SymEigsBase.h"
#include "Util/GEigsMode.h"
#include "MatOp/internal/SymGEigsCholeskyOp.h"
#include "MatOp/internal/SymGEigsRegInvOp.h"
namespace Spectra {
///
/// \defgroup GEigenSolver Generalized Eigen Solvers
///
/// Generalized eigen solvers for different types of problems.
///
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ is symmetric and
/// \f$B\f$ is positive definite.
///
/// There are two modes of this solver, specified by the template parameter
/// GEigsMode. See the pages for the specialized classes for details.
/// - The Cholesky mode assumes that \f$B\f$ can be factorized using Cholesky
/// decomposition, which is the preferred mode when the decomposition is
/// available. (This can be easily done in Eigen using the dense or sparse
/// Cholesky solver.)
/// See \ref SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_CHOLESKY> "SymGEigsSolver (Cholesky mode)" for more details.
/// - The regular inverse mode requires the matrix-vector product \f$Bv\f$ and the
/// linear equation solving operation \f$B^{-1}v\f$. This mode should only be
/// used when the Cholesky decomposition of \f$B\f$ is hard to implement, or
/// when computing \f$B^{-1}v\f$ is much faster than the Cholesky decomposition.
/// See \ref SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_REGULAR_INVERSE> "SymGEigsSolver (Regular inverse mode)" for more details.
// Empty class template
template <typename Scalar,
int SelectionRule,
typename OpType,
typename BOpType,
int GEigsMode>
class SymGEigsSolver
{};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices using Cholesky decomposition, i.e., to solve \f$Ax=\lambda Bx\f$
/// where \f$A\f$ is symmetric and \f$B\f$ is positive definite with the Cholesky
/// decomposition \f$B=LL'\f$.
///
/// This solver requires two matrix operation objects: one for \f$A\f$ that implements
/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the lower
/// and upper triangular solving \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$.
///
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation
/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and
/// the second operation can be created using the DenseCholesky or SparseCholesky
/// classes. If the users need to define their own operation classes, then they
/// should implement all the public member functions as in those built-in classes.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the requested eigenvalues, for example `LARGEST_MAGN`
/// to retrieve eigenvalues with the largest magnitude.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their
/// own that implements all the public member functions as in
/// DenseSymMatProd.
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
/// use the wrapper classes such as DenseCholesky and
/// SparseCholesky, or define their
/// own that implements all the public member functions as in
/// DenseCholesky.
/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEIGS_CHOLESKY.
///
/// Below is an example that demonstrates the usage of this class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Eigen/SparseCore>
/// #include <Eigen/Eigenvalues>
/// #include <Spectra/SymGEigsSolver.h>
/// #include <Spectra/MatOp/DenseSymMatProd.h>
/// #include <Spectra/MatOp/SparseCholesky.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to solve the generalized eigenvalue problem A * x = lambda * B * x
/// const int n = 100;
///
/// // Define the A matrix
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n);
/// Eigen::MatrixXd A = M + M.transpose();
///
/// // Define the B matrix, a band matrix with 2 on the diagonal and 1 on the subdiagonals
/// Eigen::SparseMatrix<double> B(n, n);
/// B.reserve(Eigen::VectorXi::Constant(n, 3));
/// for(int i = 0; i < n; i++)
/// {
/// B.insert(i, i) = 2.0;
/// if(i > 0)
/// B.insert(i - 1, i) = 1.0;
/// if(i < n - 1)
/// B.insert(i + 1, i) = 1.0;
/// }
///
/// // Construct matrix operation object using the wrapper classes
/// DenseSymMatProd<double> op(A);
/// SparseCholesky<double> Bop(B);
///
/// // Construct generalized eigen solver object, requesting the largest three generalized eigenvalues
/// SymGEigsSolver<double, LARGEST_ALGE, DenseSymMatProd<double>, SparseCholesky<double>, GEIGS_CHOLESKY>
/// geigs(&op, &Bop, 3, 6);
///
/// // Initialize and compute
/// geigs.init();
/// int nconv = geigs.compute();
///
/// // Retrieve results
/// Eigen::VectorXd evalues;
/// Eigen::MatrixXd evecs;
/// if(geigs.info() == SUCCESSFUL)
/// {
/// evalues = geigs.eigenvalues();
/// evecs = geigs.eigenvectors();
/// }
///
/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl;
/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl;
///
/// // Verify results using the generalized eigen solver in Eigen
/// Eigen::MatrixXd Bdense = B;
/// Eigen::GeneralizedSelfAdjointEigenSolver<Eigen::MatrixXd> es(A, Bdense);
///
/// std::cout << "Generalized eigenvalues:\n" << es.eigenvalues().tail(3) << std::endl;
/// std::cout << "Generalized eigenvectors:\n" << es.eigenvectors().rightCols(3).topRows(10) << std::endl;
///
/// return 0;
/// }
/// \endcode
// Partial specialization for GEigsMode = GEIGS_CHOLESKY
template <typename Scalar,
int SelectionRule,
typename OpType,
typename BOpType>
class SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_CHOLESKY> :
public SymEigsBase<Scalar, SelectionRule, SymGEigsCholeskyOp<Scalar, OpType, BOpType>, IdentityBOp>
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
BOpType* m_Bop;
public:
///
/// Constructor to create a solver object.
///
/// \param op Pointer to the \f$A\f$ matrix operation object. It
/// should implement the matrix-vector multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper classes such as DenseSymMatProd, or
/// define their own that implements all the public member functions
/// as in DenseSymMatProd.
/// \param Bop Pointer to the \f$B\f$ matrix operation object. It
/// represents a Cholesky decomposition of \f$B\f$, and should
/// implement the lower and upper triangular solving operations:
/// calculating \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$ for any vector
/// \f$v\f$, where \f$LL'=B\f$. Users could either
/// create the object from the wrapper classes such as DenseCholesky, or
/// define their own that implements all the public member functions
/// as in DenseCholesky.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
///
SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) :
SymEigsBase<Scalar, SelectionRule, SymGEigsCholeskyOp<Scalar, OpType, BOpType>, IdentityBOp>(
new SymGEigsCholeskyOp<Scalar, OpType, BOpType>(*op, *Bop), NULL, nev, ncv),
m_Bop(Bop)
{}
/// \cond
~SymGEigsSolver()
{
// m_op contains the constructed SymGEigsCholeskyOp object
delete this->m_op;
}
Matrix eigenvectors(Index nvec) const
{
Matrix res = SymEigsBase<Scalar, SelectionRule, SymGEigsCholeskyOp<Scalar, OpType, BOpType>, IdentityBOp>::eigenvectors(nvec);
Vector tmp(res.rows());
const Index nconv = res.cols();
for (Index i = 0; i < nconv; i++)
{
m_Bop->upper_triangular_solve(&res(0, i), tmp.data());
res.col(i).noalias() = tmp;
}
return res;
}
Matrix eigenvectors() const
{
return SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_CHOLESKY>::eigenvectors(this->m_nev);
}
/// \endcond
};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices in the regular inverse mode, i.e., to solve \f$Ax=\lambda Bx\f$
/// where \f$A\f$ is symmetric, and \f$B\f$ is positive definite with the operations
/// defined below.
///
/// This solver requires two matrix operation objects: one for \f$A\f$ that implements
/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the
/// matrix-vector product \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$.
///
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation
/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and
/// the second operation can be created using the SparseRegularInverse class. There is no
/// wrapper class for a dense \f$B\f$ matrix since in this case the Cholesky mode
/// is always preferred. If the users need to define their own operation classes, then they
/// should implement all the public member functions as in those built-in classes.
///
/// \tparam Scalar The element type of the matrix.
/// Currently supported types are `float`, `double` and `long double`.
/// \tparam SelectionRule An enumeration value indicating the selection rule of
/// the requested eigenvalues, for example `LARGEST_MAGN`
/// to retrieve eigenvalues with the largest magnitude.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their
/// own that implements all the public member functions as in
/// DenseSymMatProd.
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
/// use the wrapper class SparseRegularInverse, or define their
/// own that implements all the public member functions as in
/// SparseRegularInverse.
/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEIGS_REGULAR_INVERSE.
///
// Partial specialization for GEigsMode = GEIGS_REGULAR_INVERSE
template <typename Scalar,
int SelectionRule,
typename OpType,
typename BOpType>
class SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_REGULAR_INVERSE> :
public SymEigsBase<Scalar, SelectionRule, SymGEigsRegInvOp<Scalar, OpType, BOpType>, BOpType>
{
private:
typedef Eigen::Index Index;
public:
///
/// Constructor to create a solver object.
///
/// \param op Pointer to the \f$A\f$ matrix operation object. It
/// should implement the matrix-vector multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper classes such as DenseSymMatProd, or
/// define their own that implements all the public member functions
/// as in DenseSymMatProd.
/// \param Bop Pointer to the \f$B\f$ matrix operation object. It should
/// implement the multiplication operation \f$Bv\f$ and the linear equation
/// solving operation \f$B^{-1}v\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class SparseRegularInverse, or
/// define their own that implements all the public member functions
/// as in SparseRegularInverse.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
///
SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) :
SymEigsBase<Scalar, SelectionRule, SymGEigsRegInvOp<Scalar, OpType, BOpType>, BOpType>(
new SymGEigsRegInvOp<Scalar, OpType, BOpType>(*op, *Bop), Bop, nev, ncv)
{}
/// \cond
~SymGEigsSolver()
{
// m_op contains the constructed SymGEigsRegInvOp object
delete this->m_op;
}
/// \endcond
};
} // namespace Spectra
#endif // SYM_GEIGS_SOLVER_H

35
gtsam/3rdparty/Spectra/Util/CompInfo.h vendored Normal file
View File

@ -0,0 +1,35 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef COMP_INFO_H
#define COMP_INFO_H
namespace Spectra {
///
/// \ingroup Enumerations
///
/// The enumeration to report the status of computation.
///
enum COMPUTATION_INFO
{
SUCCESSFUL = 0, ///< Computation was successful.
NOT_COMPUTED, ///< Used in eigen solvers, indicating that computation
///< has not been conducted. Users should call
///< the `compute()` member function of solvers.
NOT_CONVERGING, ///< Used in eigen solvers, indicating that some eigenvalues
///< did not converge. The `compute()`
///< function returns the number of converged eigenvalues.
NUMERICAL_ISSUE ///< Used in Cholesky decomposition, indicating that the
///< matrix is not positive definite.
};
} // namespace Spectra
#endif // COMP_INFO_H

32
gtsam/3rdparty/Spectra/Util/GEigsMode.h vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef GEIGS_MODE_H
#define GEIGS_MODE_H
namespace Spectra {
///
/// \ingroup Enumerations
///
/// The enumeration to specify the mode of generalized eigenvalue solver.
///
enum GEIGS_MODE
{
GEIGS_CHOLESKY = 0, ///< Using Cholesky decomposition to solve generalized eigenvalues.
GEIGS_REGULAR_INVERSE, ///< Regular inverse mode for generalized eigenvalue solver.
GEIGS_SHIFT_INVERT, ///< Shift-and-invert mode for generalized eigenvalue solver.
GEIGS_BUCKLING, ///< Buckling mode for generalized eigenvalue solver.
GEIGS_CAYLEY ///< Cayley transformation mode for generalized eigenvalue solver.
};
} // namespace Spectra
#endif // GEIGS_MODE_H

View File

@ -0,0 +1,275 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SELECTION_RULE_H
#define SELECTION_RULE_H
#include <vector> // std::vector
#include <cmath> // std::abs
#include <algorithm> // std::sort
#include <complex> // std::complex
#include <utility> // std::pair
#include <stdexcept> // std::invalid_argument
namespace Spectra {
///
/// \defgroup Enumerations
///
/// Enumeration types for the selection rule of eigenvalues.
///
///
/// \ingroup Enumerations
///
/// The enumeration of selection rules of desired eigenvalues.
///
enum SELECT_EIGENVALUE
{
LARGEST_MAGN = 0, ///< Select eigenvalues with largest magnitude. Magnitude
///< means the absolute value for real numbers and norm for
///< complex numbers. Applies to both symmetric and general
///< eigen solvers.
LARGEST_REAL, ///< Select eigenvalues with largest real part. Only for general eigen solvers.
LARGEST_IMAG, ///< Select eigenvalues with largest imaginary part (in magnitude). Only for general eigen solvers.
LARGEST_ALGE, ///< Select eigenvalues with largest algebraic value, considering
///< any negative sign. Only for symmetric eigen solvers.
SMALLEST_MAGN, ///< Select eigenvalues with smallest magnitude. Applies to both symmetric and general
///< eigen solvers.
SMALLEST_REAL, ///< Select eigenvalues with smallest real part. Only for general eigen solvers.
SMALLEST_IMAG, ///< Select eigenvalues with smallest imaginary part (in magnitude). Only for general eigen solvers.
SMALLEST_ALGE, ///< Select eigenvalues with smallest algebraic value. Only for symmetric eigen solvers.
BOTH_ENDS ///< Select eigenvalues half from each end of the spectrum. When
///< `nev` is odd, compute more from the high end. Only for symmetric eigen solvers.
};
///
/// \ingroup Enumerations
///
/// The enumeration of selection rules of desired eigenvalues. Alias for `SELECT_EIGENVALUE`.
///
enum SELECT_EIGENVALUE_ALIAS
{
WHICH_LM = 0, ///< Alias for `LARGEST_MAGN`
WHICH_LR, ///< Alias for `LARGEST_REAL`
WHICH_LI, ///< Alias for `LARGEST_IMAG`
WHICH_LA, ///< Alias for `LARGEST_ALGE`
WHICH_SM, ///< Alias for `SMALLEST_MAGN`
WHICH_SR, ///< Alias for `SMALLEST_REAL`
WHICH_SI, ///< Alias for `SMALLEST_IMAG`
WHICH_SA, ///< Alias for `SMALLEST_ALGE`
WHICH_BE ///< Alias for `BOTH_ENDS`
};
/// \cond
// Get the element type of a "scalar"
// ElemType<double> => double
// ElemType< std::complex<double> > => double
template <typename T>
class ElemType
{
public:
typedef T type;
};
template <typename T>
class ElemType<std::complex<T> >
{
public:
typedef T type;
};
// When comparing eigenvalues, we first calculate the "target"
// to sort. For example, if we want to choose the eigenvalues with
// largest magnitude, the target will be -abs(x).
// The minus sign is due to the fact that std::sort() sorts in ascending order.
// Default target: throw an exception
template <typename Scalar, int SelectionRule>
class SortingTarget
{
public:
static typename ElemType<Scalar>::type get(const Scalar& val)
{
using std::abs;
throw std::invalid_argument("incompatible selection rule");
return -abs(val);
}
};
// Specialization for LARGEST_MAGN
// This covers [float, double, complex] x [LARGEST_MAGN]
template <typename Scalar>
class SortingTarget<Scalar, LARGEST_MAGN>
{
public:
static typename ElemType<Scalar>::type get(const Scalar& val)
{
using std::abs;
return -abs(val);
}
};
// Specialization for LARGEST_REAL
// This covers [complex] x [LARGEST_REAL]
template <typename RealType>
class SortingTarget<std::complex<RealType>, LARGEST_REAL>
{
public:
static RealType get(const std::complex<RealType>& val)
{
return -val.real();
}
};
// Specialization for LARGEST_IMAG
// This covers [complex] x [LARGEST_IMAG]
template <typename RealType>
class SortingTarget<std::complex<RealType>, LARGEST_IMAG>
{
public:
static RealType get(const std::complex<RealType>& val)
{
using std::abs;
return -abs(val.imag());
}
};
// Specialization for LARGEST_ALGE
// This covers [float, double] x [LARGEST_ALGE]
template <typename Scalar>
class SortingTarget<Scalar, LARGEST_ALGE>
{
public:
static Scalar get(const Scalar& val)
{
return -val;
}
};
// Here BOTH_ENDS is the same as LARGEST_ALGE, but
// we need some additional steps, which are done in
// SymEigsSolver.h => retrieve_ritzpair().
// There we move the smallest values to the proper locations.
template <typename Scalar>
class SortingTarget<Scalar, BOTH_ENDS>
{
public:
static Scalar get(const Scalar& val)
{
return -val;
}
};
// Specialization for SMALLEST_MAGN
// This covers [float, double, complex] x [SMALLEST_MAGN]
template <typename Scalar>
class SortingTarget<Scalar, SMALLEST_MAGN>
{
public:
static typename ElemType<Scalar>::type get(const Scalar& val)
{
using std::abs;
return abs(val);
}
};
// Specialization for SMALLEST_REAL
// This covers [complex] x [SMALLEST_REAL]
template <typename RealType>
class SortingTarget<std::complex<RealType>, SMALLEST_REAL>
{
public:
static RealType get(const std::complex<RealType>& val)
{
return val.real();
}
};
// Specialization for SMALLEST_IMAG
// This covers [complex] x [SMALLEST_IMAG]
template <typename RealType>
class SortingTarget<std::complex<RealType>, SMALLEST_IMAG>
{
public:
static RealType get(const std::complex<RealType>& val)
{
using std::abs;
return abs(val.imag());
}
};
// Specialization for SMALLEST_ALGE
// This covers [float, double] x [SMALLEST_ALGE]
template <typename Scalar>
class SortingTarget<Scalar, SMALLEST_ALGE>
{
public:
static Scalar get(const Scalar& val)
{
return val;
}
};
// Sort eigenvalues and return the order index
template <typename PairType>
class PairComparator
{
public:
bool operator()(const PairType& v1, const PairType& v2)
{
return v1.first < v2.first;
}
};
template <typename T, int SelectionRule>
class SortEigenvalue
{
private:
typedef typename ElemType<T>::type TargetType; // Type of the sorting target, will be
// a floating number type, e.g. "double"
typedef std::pair<TargetType, int> PairType; // Type of the sorting pair, including
// the sorting target and the index
std::vector<PairType> pair_sort;
public:
SortEigenvalue(const T* start, int size) :
pair_sort(size)
{
for (int i = 0; i < size; i++)
{
pair_sort[i].first = SortingTarget<T, SelectionRule>::get(start[i]);
pair_sort[i].second = i;
}
PairComparator<PairType> comp;
std::sort(pair_sort.begin(), pair_sort.end(), comp);
}
std::vector<int> index()
{
std::vector<int> ind(pair_sort.size());
for (unsigned int i = 0; i < ind.size(); i++)
ind[i] = pair_sort[i].second;
return ind;
}
};
/// \endcond
} // namespace Spectra
#endif // SELECTION_RULE_H

View File

@ -0,0 +1,91 @@
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SIMPLE_RANDOM_H
#define SIMPLE_RANDOM_H
#include <Eigen/Core>
/// \cond
namespace Spectra {
// We need a simple pseudo random number generator here:
// 1. It is used to generate initial and restarted residual vector.
// 2. It is not necessary to be so "random" and advanced. All we hope
// is that the residual vector is not in the space spanned by the
// current Krylov space. This should be met almost surely.
// 3. We don't want to call RNG in C++, since we actually want the
// algorithm to be deterministic. Also, calling RNG in C/C++ is not
// allowed in R packages submitted to CRAN.
// 4. The method should be as simple as possible, so an LCG is enough.
// 5. Based on public domain code by Ray Gardner
// http://stjarnhimlen.se/snippets/rg_rand.c
template <typename Scalar = double>
class SimpleRandom
{
private:
typedef Eigen::Index Index;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
const unsigned int m_a; // multiplier
const unsigned long m_max; // 2^31 - 1
long m_rand;
inline long next_long_rand(long seed)
{
unsigned long lo, hi;
lo = m_a * (long) (seed & 0xFFFF);
hi = m_a * (long) ((unsigned long) seed >> 16);
lo += (hi & 0x7FFF) << 16;
if (lo > m_max)
{
lo &= m_max;
++lo;
}
lo += hi >> 15;
if (lo > m_max)
{
lo &= m_max;
++lo;
}
return (long) lo;
}
public:
SimpleRandom(unsigned long init_seed) :
m_a(16807),
m_max(2147483647L),
m_rand(init_seed ? (init_seed & m_max) : 1)
{}
Scalar random()
{
m_rand = next_long_rand(m_rand);
return Scalar(m_rand) / Scalar(m_max) - Scalar(0.5);
}
// Vector of random numbers of type Scalar
// Ranging from -0.5 to 0.5
Vector random_vec(const Index len)
{
Vector res(len);
for (Index i = 0; i < len; i++)
{
m_rand = next_long_rand(m_rand);
res[i] = Scalar(m_rand) / Scalar(m_max) - Scalar(0.5);
}
return res;
}
};
} // namespace Spectra
/// \endcond
#endif // SIMPLE_RANDOM_H

View File

@ -0,0 +1,71 @@
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef TYPE_TRAITS_H
#define TYPE_TRAITS_H
#include <Eigen/Core>
#include <limits>
/// \cond
namespace Spectra {
// For a real value type "Scalar", we want to know its smallest
// positive value, i.e., std::numeric_limits<Scalar>::min().
// However, we must take non-standard value types into account,
// so we rely on Eigen::NumTraits.
//
// Eigen::NumTraits has defined epsilon() and lowest(), but
// lowest() means negative highest(), which is a very small
// negative value.
//
// Therefore, we manually define this limit, and use eplison()^3
// to mimic it for non-standard types.
// Generic definition
template <typename Scalar>
struct TypeTraits
{
static inline Scalar min()
{
return Eigen::numext::pow(Eigen::NumTraits<Scalar>::epsilon(), Scalar(3));
}
};
// Full specialization
template <>
struct TypeTraits<float>
{
static inline float min()
{
return std::numeric_limits<float>::min();
}
};
template <>
struct TypeTraits<double>
{
static inline double min()
{
return std::numeric_limits<double>::min();
}
};
template <>
struct TypeTraits<long double>
{
static inline long double min()
{
return std::numeric_limits<long double>::min();
}
};
} // namespace Spectra
/// \endcond
#endif // TYPE_TRAITS_H

View File

@ -0,0 +1,552 @@
// Written by Anna Araslanova
// Modified by Yixuan Qiu
// License: MIT
#ifndef LOBPCG_SOLVER
#define LOBPCG_SOLVER
#include <functional>
#include <map>
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/Eigenvalues>
#include <Eigen/SVD>
#include <Eigen/SparseCholesky>
#include "../SymGEigsSolver.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// *** METHOD
/// The class represent the LOBPCG algorithm, which was invented by Andrew Knyazev
/// Theoretical background of the procedure can be found in the articles below
/// - Knyazev, A.V., 2001. Toward the optimal preconditioned eigensolver : Locally optimal block preconditioned conjugate gradient method.SIAM journal on scientific computing, 23(2), pp.517 - 541.
/// - Knyazev, A.V., Argentati, M.E., Lashuk, I. and Ovtchinnikov, E.E., 2007. Block locally optimal preconditioned eigenvalue xolvers(BLOPEX) in HYPRE and PETSc.SIAM Journal on Scientific Computing, 29(5), pp.2224 - 2239.
///
/// *** CONDITIONS OF USE
/// Locally Optimal Block Preconditioned Conjugate Gradient(LOBPCG) is a method for finding the M smallest eigenvalues
/// and eigenvectors of a large symmetric positive definite generalized eigenvalue problem
/// \f$Ax=\lambda Bx,\f$
/// where \f$A_{NxN}\f$ is a symmetric matrix, \f$B\f$ is symmetric and positive - definite. \f$A and B\f$ are also assumed large and sparse
/// \f$\textit{M}\f$ should be \f$\<< textit{N}\f$ (at least \f$\textit{5M} < \textit{N} \f$)
///
/// *** ARGUMENTS
/// Eigen::SparseMatrix<long double> A; // N*N - Ax = lambda*Bx, lrage and sparse
/// Eigen::SparseMatrix<long double> X; // N*M - initial approximations to eigenvectors (random in general case)
/// Spectra::LOBPCGSolver<long double> solver(A, X);
/// *Eigen::SparseMatrix<long double> B; // N*N - Ax = lambda*Bx, sparse, positive definite
/// solver.setConstraints(B);
/// *Eigen::SparseMatrix<long double> Y; // N*K - constraints, already found eigenvectors
/// solver.setB(B);
/// *Eigen::SparseMatrix<long double> T; // N*N - preconditioner ~ A^-1
/// solver.setPreconditioner(T);
///
/// *** OUTCOMES
/// solver.solve(); // compute eigenpairs // void
/// solver.info(); // state of converjance // int
/// solver.residuals(); // get residuals to evaluate biases // Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>
/// solver.eigenvalues(); // get eigenvalues // Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>
/// solver.eigenvectors(); // get eigenvectors // Eigen::Matrix<Scalar, Eigen::Dynamic, 1>
///
/// *** EXAMPLE
/// \code{.cpp}
/// #include <Spectra/contrib/SymSparseEigsSolverLOBPCG.h>
///
/// // random A
/// Matrix a;
/// a = (Matrix::Random(10, 10).array() > 0.6).cast<long double>() * Matrix::Random(10, 10).array() * 5;
/// a = Matrix((a).triangularView<Eigen::Lower>()) + Matrix((a).triangularView<Eigen::Lower>()).transpose();
/// for (int i = 0; i < 10; i++)
/// a(i, i) = i + 0.5;
/// std::cout << a << "\n";
/// Eigen::SparseMatrix<long double> A(a.sparseView());
/// // random X
/// Eigen::Matrix<long double, 10, 2> x;
/// x = Matrix::Random(10, 2).array();
/// Eigen::SparseMatrix<long double> X(x.sparseView());
/// // solve Ax = lambda*x
/// Spectra::LOBPCGSolver<long double> solver(A, X);
/// solver.compute(10, 1e-4); // 10 iterations, L2_tolerance = 1e-4*N
/// std::cout << "info\n" << solver.info() << std::endl;
/// std::cout << "eigenvalues\n" << solver.eigenvalues() << std::endl;
/// std::cout << "eigenvectors\n" << solver.eigenvectors() << std::endl;
/// std::cout << "residuals\n" << solver.residuals() << std::endl;
/// \endcode
///
template <typename Scalar = long double>
class LOBPCGSolver
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef std::complex<Scalar> Complex;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix;
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
typedef Eigen::SparseMatrix<Scalar> SparseMatrix;
typedef Eigen::SparseMatrix<Complex> SparseComplexMatrix;
const int m_n; // dimension of matrix A
const int m_nev; // number of eigenvalues requested
SparseMatrix A, X;
SparseMatrix m_Y, m_B, m_preconditioner;
bool flag_with_constraints, flag_with_B, flag_with_preconditioner;
public:
SparseMatrix m_residuals;
Matrix m_evectors;
Vector m_evalues;
int m_info;
private:
// B-orthonormalize matrix M
int orthogonalizeInPlace(SparseMatrix& M, SparseMatrix& B,
SparseMatrix& true_BM, bool has_true_BM = false)
{
SparseMatrix BM;
if (has_true_BM == false)
{
if (flag_with_B)
{
BM = B * M;
}
else
{
BM = M;
}
}
else
{
BM = true_BM;
}
Eigen::SimplicialLDLT<SparseMatrix> chol_MBM(M.transpose() * BM);
if (chol_MBM.info() != SUCCESSFUL)
{
// LDLT decomposition fail
m_info = chol_MBM.info();
return chol_MBM.info();
}
SparseComplexMatrix Upper_MBM = chol_MBM.matrixU().template cast<Complex>();
ComplexVector D_MBM_vec = chol_MBM.vectorD().template cast<Complex>();
D_MBM_vec = D_MBM_vec.cwiseSqrt();
for (int i = 0; i < D_MBM_vec.rows(); i++)
{
D_MBM_vec(i) = Complex(1.0, 0.0) / D_MBM_vec(i);
}
SparseComplexMatrix D_MBM_mat(D_MBM_vec.asDiagonal());
SparseComplexMatrix U_inv(Upper_MBM.rows(), Upper_MBM.cols());
U_inv.setIdentity();
Upper_MBM.template triangularView<Eigen::Upper>().solveInPlace(U_inv);
SparseComplexMatrix right_product = U_inv * D_MBM_mat;
M = M * right_product.real();
if (flag_with_B)
{
true_BM = B * M;
}
else
{
true_BM = M;
}
return SUCCESSFUL;
}
void applyConstraintsInPlace(SparseMatrix& X, SparseMatrix& Y,
SparseMatrix& B)
{
SparseMatrix BY;
if (flag_with_B)
{
BY = B * Y;
}
else
{
BY = Y;
}
SparseMatrix YBY = Y.transpose() * BY;
SparseMatrix BYX = BY.transpose() * X;
SparseMatrix YBY_XYX = (Matrix(YBY).bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(Matrix(BYX))).sparseView();
X = X - Y * YBY_XYX;
}
/*
return
'AB
CD'
*/
Matrix stack_4_matricies(Matrix A, Matrix B,
Matrix C, Matrix D)
{
Matrix result(A.rows() + C.rows(), A.cols() + B.cols());
result.topLeftCorner(A.rows(), A.cols()) = A;
result.topRightCorner(B.rows(), B.cols()) = B;
result.bottomLeftCorner(C.rows(), C.cols()) = C;
result.bottomRightCorner(D.rows(), D.cols()) = D;
return result;
}
Matrix stack_9_matricies(Matrix A, Matrix B, Matrix C,
Matrix D, Matrix E, Matrix F,
Matrix G, Matrix H, Matrix I)
{
Matrix result(A.rows() + D.rows() + G.rows(), A.cols() + B.cols() + C.cols());
result.block(0, 0, A.rows(), A.cols()) = A;
result.block(0, A.cols(), B.rows(), B.cols()) = B;
result.block(0, A.cols() + B.cols(), C.rows(), C.cols()) = C;
result.block(A.rows(), 0, D.rows(), D.cols()) = D;
result.block(A.rows(), A.cols(), E.rows(), E.cols()) = E;
result.block(A.rows(), A.cols() + B.cols(), F.rows(), F.cols()) = F;
result.block(A.rows() + D.rows(), 0, G.rows(), G.cols()) = G;
result.block(A.rows() + D.rows(), A.cols(), H.rows(), H.cols()) = H;
result.block(A.rows() + D.rows(), A.cols() + B.cols(), I.rows(), I.cols()) = I;
return result;
}
void sort_epairs(Vector& evalues, Matrix& evectors, int SelectionRule)
{
std::function<bool(Scalar, Scalar)> cmp;
if (SelectionRule == SMALLEST_ALGE)
cmp = std::less<Scalar>{};
else
cmp = std::greater<Scalar>{};
std::map<Scalar, Vector, decltype(cmp)> epairs(cmp);
for (int i = 0; i < m_evectors.cols(); ++i)
epairs.insert(std::make_pair(evalues(i), evectors.col(i)));
int i = 0;
for (auto& epair : epairs)
{
evectors.col(i) = epair.second;
evalues(i) = epair.first;
i++;
}
}
void removeColumns(SparseMatrix& matrix, std::vector<int>& colToRemove)
{
// remove columns through matrix multiplication
SparseMatrix new_matrix(matrix.cols(), matrix.cols() - int(colToRemove.size()));
int iCol = 0;
std::vector<Eigen::Triplet<Scalar>> tripletList;
tripletList.reserve(matrix.cols() - int(colToRemove.size()));
for (int iRow = 0; iRow < matrix.cols(); iRow++)
{
if (std::find(colToRemove.begin(), colToRemove.end(), iRow) == colToRemove.end())
{
tripletList.push_back(Eigen::Triplet<Scalar>(iRow, iCol, 1));
iCol++;
}
}
new_matrix.setFromTriplets(tripletList.begin(), tripletList.end());
matrix = matrix * new_matrix;
}
int checkConvergence_getBlocksize(SparseMatrix& m_residuals, Scalar tolerance_L2, std::vector<int>& columnsToDelete)
{
// square roots from sum of squares by column
int BlockSize = m_nev;
Scalar sum, buffer;
for (int iCol = 0; iCol < m_nev; iCol++)
{
sum = 0;
for (int iRow = 0; iRow < m_n; iRow++)
{
buffer = m_residuals.coeff(iRow, iCol);
sum += buffer * buffer;
}
if (sqrt(sum) < tolerance_L2)
{
BlockSize--;
columnsToDelete.push_back(iCol);
}
}
return BlockSize;
}
public:
LOBPCGSolver(const SparseMatrix& A, const SparseMatrix X) :
m_n(A.rows()),
m_nev(X.cols()),
m_info(NOT_COMPUTED),
flag_with_constraints(false),
flag_with_B(false),
flag_with_preconditioner(false),
A(A),
X(X)
{
if (A.rows() != X.rows() || A.rows() != A.cols())
throw std::invalid_argument("Wrong size");
//if (m_n < 5* m_nev)
// throw std::invalid_argument("The problem size is small compared to the block size. Use standard eigensolver");
}
void setConstraints(const SparseMatrix& Y)
{
m_Y = Y;
flag_with_constraints = true;
}
void setB(const SparseMatrix& B)
{
if (B.rows() != A.rows() || B.cols() != A.cols())
throw std::invalid_argument("Wrong size");
m_B = B;
flag_with_B = true;
}
void setPreconditioner(const SparseMatrix& preconditioner)
{
m_preconditioner = preconditioner;
flag_with_preconditioner = true;
}
void compute(int maxit = 10, Scalar tol_div_n = 1e-7)
{
Scalar tolerance_L2 = tol_div_n * m_n;
int BlockSize;
int max_iter = std::min(m_n, maxit);
SparseMatrix directions, AX, AR, BX, AD, ADD, DD, BDD, BD, XAD, RAD, DAD, XBD, RBD, BR, sparse_eVecX, sparse_eVecR, sparse_eVecD, inverse_matrix;
Matrix XAR, RAR, XBR, gramA, gramB, eVecX, eVecR, eVecD;
std::vector<int> columnsToDelete;
if (flag_with_constraints)
{
// Apply the constraints Y to X
applyConstraintsInPlace(X, m_Y, m_B);
}
// Make initial vectors orthonormal
// implicit BX declaration
if (orthogonalizeInPlace(X, m_B, BX) != SUCCESSFUL)
{
max_iter = 0;
}
AX = A * X;
// Solve the following NxN eigenvalue problem for all N eigenvalues and -vectors:
// first approximation via a dense problem
Eigen::EigenSolver<Matrix> eigs(Matrix(X.transpose() * AX));
if (eigs.info() != SUCCESSFUL)
{
m_info = eigs.info();
max_iter = 0;
}
else
{
m_evalues = eigs.eigenvalues().real();
m_evectors = eigs.eigenvectors().real();
sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE);
sparse_eVecX = m_evectors.sparseView();
X = X * sparse_eVecX;
AX = AX * sparse_eVecX;
BX = BX * sparse_eVecX;
}
for (int iter_num = 0; iter_num < max_iter; iter_num++)
{
m_residuals.resize(m_n, m_nev);
for (int i = 0; i < m_nev; i++)
{
m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i);
}
BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete);
if (BlockSize == 0)
{
m_info = SUCCESSFUL;
break;
}
// substitution of the original active mask
if (columnsToDelete.size() > 0)
{
removeColumns(m_residuals, columnsToDelete);
if (iter_num > 0)
{
removeColumns(directions, columnsToDelete);
removeColumns(AD, columnsToDelete);
removeColumns(BD, columnsToDelete);
}
columnsToDelete.clear(); // for next iteration
}
if (flag_with_preconditioner)
{
// Apply the preconditioner to the residuals
m_residuals = m_preconditioner * m_residuals;
}
if (flag_with_constraints)
{
// Apply the constraints Y to residuals
applyConstraintsInPlace(m_residuals, m_Y, m_B);
}
if (orthogonalizeInPlace(m_residuals, m_B, BR) != SUCCESSFUL)
{
break;
}
AR = A * m_residuals;
// Orthonormalize conjugate directions
if (iter_num > 0)
{
if (orthogonalizeInPlace(directions, m_B, BD, true) != SUCCESSFUL)
{
break;
}
AD = A * directions;
}
// Perform the Rayleigh Ritz Procedure
XAR = Matrix(X.transpose() * AR);
RAR = Matrix(m_residuals.transpose() * AR);
XBR = Matrix(X.transpose() * BR);
if (iter_num > 0)
{
XAD = X.transpose() * AD;
RAD = m_residuals.transpose() * AD;
DAD = directions.transpose() * AD;
XBD = X.transpose() * BD;
RBD = m_residuals.transpose() * BD;
gramA = stack_9_matricies(m_evalues.asDiagonal(), XAR, XAD, XAR.transpose(), RAR, RAD, XAD.transpose(), RAD.transpose(), DAD.transpose());
gramB = stack_9_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBD, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize), RBD, XBD.transpose(), RBD.transpose(), Matrix::Identity(BlockSize, BlockSize));
}
else
{
gramA = stack_4_matricies(m_evalues.asDiagonal(), XAR, XAR.transpose(), RAR);
gramB = stack_4_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize));
}
//calculate the lowest/largest m eigenpairs; Solve the generalized eigenvalue problem.
DenseSymMatProd<Scalar> Aop(gramA);
DenseCholesky<Scalar> Bop(gramB);
SymGEigsSolver<Scalar, SMALLEST_ALGE, DenseSymMatProd<Scalar>,
DenseCholesky<Scalar>, GEIGS_CHOLESKY>
geigs(&Aop, &Bop, m_nev, std::min(10, int(gramA.rows()) - 1));
geigs.init();
int nconv = geigs.compute();
//Mat evecs;
if (geigs.info() == SUCCESSFUL)
{
m_evalues = geigs.eigenvalues();
m_evectors = geigs.eigenvectors();
sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE);
}
else
{
// Problem With General EgenVec
m_info = geigs.info();
break;
}
// Compute Ritz vectors
if (iter_num > 0)
{
eVecX = m_evectors.block(0, 0, m_nev, m_nev);
eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev);
eVecD = m_evectors.block(m_nev + BlockSize, 0, BlockSize, m_nev);
sparse_eVecX = eVecX.sparseView();
sparse_eVecR = eVecR.sparseView();
sparse_eVecD = eVecD.sparseView();
DD = m_residuals * sparse_eVecR; // new conjugate directions
ADD = AR * sparse_eVecR;
BDD = BR * sparse_eVecR;
DD = DD + directions * sparse_eVecD;
ADD = ADD + AD * sparse_eVecD;
BDD = BDD + BD * sparse_eVecD;
}
else
{
eVecX = m_evectors.block(0, 0, m_nev, m_nev);
eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev);
sparse_eVecX = eVecX.sparseView();
sparse_eVecR = eVecR.sparseView();
DD = m_residuals * sparse_eVecR;
ADD = AR * sparse_eVecR;
BDD = BR * sparse_eVecR;
}
X = X * sparse_eVecX + DD;
AX = AX * sparse_eVecX + ADD;
BX = BX * sparse_eVecX + BDD;
directions = DD;
AD = ADD;
BD = BDD;
} // iteration loop
// calculate last residuals
m_residuals.resize(m_n, m_nev);
for (int i = 0; i < m_nev; i++)
{
m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i);
}
BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete);
if (BlockSize == 0)
{
m_info = SUCCESSFUL;
}
} // compute
Vector eigenvalues()
{
return m_evalues;
}
Matrix eigenvectors()
{
return m_evectors;
}
Matrix residuals()
{
return Matrix(m_residuals);
}
int info() { return m_info; }
};
} // namespace Spectra
#endif // LOBPCG_SOLVER

View File

@ -0,0 +1,202 @@
// Copyright (C) 2018 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef PARTIAL_SVD_SOLVER_H
#define PARTIAL_SVD_SOLVER_H
#include <Eigen/Core>
#include "../SymEigsSolver.h"
namespace Spectra {
// Abstract class for matrix operation
template <typename Scalar>
class SVDMatOp
{
public:
virtual int rows() const = 0;
virtual int cols() const = 0;
// y_out = A' * A * x_in or y_out = A * A' * x_in
virtual void perform_op(const Scalar* x_in, Scalar* y_out) = 0;
virtual ~SVDMatOp() {}
};
// Operation of a tall matrix in SVD
// We compute the eigenvalues of A' * A
// MatrixType is either Eigen::Matrix<Scalar, ...> or Eigen::SparseMatrix<Scalar, ...>
template <typename Scalar, typename MatrixType>
class SVDTallMatOp : public SVDMatOp<Scalar>
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const MatrixType> ConstGenericMatrix;
ConstGenericMatrix m_mat;
const int m_dim;
Vector m_cache;
public:
// Constructor
SVDTallMatOp(ConstGenericMatrix& mat) :
m_mat(mat),
m_dim(std::min(mat.rows(), mat.cols())),
m_cache(mat.rows())
{}
// These are the rows and columns of A' * A
int rows() const { return m_dim; }
int cols() const { return m_dim; }
// y_out = A' * A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out)
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.cols());
m_cache.noalias() = m_mat * x;
y.noalias() = m_mat.transpose() * m_cache;
}
};
// Operation of a wide matrix in SVD
// We compute the eigenvalues of A * A'
// MatrixType is either Eigen::Matrix<Scalar, ...> or Eigen::SparseMatrix<Scalar, ...>
template <typename Scalar, typename MatrixType>
class SVDWideMatOp : public SVDMatOp<Scalar>
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef Eigen::Map<const Vector> MapConstVec;
typedef Eigen::Map<Vector> MapVec;
typedef const Eigen::Ref<const MatrixType> ConstGenericMatrix;
ConstGenericMatrix m_mat;
const int m_dim;
Vector m_cache;
public:
// Constructor
SVDWideMatOp(ConstGenericMatrix& mat) :
m_mat(mat),
m_dim(std::min(mat.rows(), mat.cols())),
m_cache(mat.cols())
{}
// These are the rows and columns of A * A'
int rows() const { return m_dim; }
int cols() const { return m_dim; }
// y_out = A * A' * x_in
void perform_op(const Scalar* x_in, Scalar* y_out)
{
MapConstVec x(x_in, m_mat.rows());
MapVec y(y_out, m_mat.rows());
m_cache.noalias() = m_mat.transpose() * x;
y.noalias() = m_mat * m_cache;
}
};
// Partial SVD solver
// MatrixType is either Eigen::Matrix<Scalar, ...> or Eigen::SparseMatrix<Scalar, ...>
template <typename Scalar = double,
typename MatrixType = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> >
class PartialSVDSolver
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef const Eigen::Ref<const MatrixType> ConstGenericMatrix;
ConstGenericMatrix m_mat;
const int m_m;
const int m_n;
SVDMatOp<Scalar>* m_op;
SymEigsSolver<Scalar, LARGEST_ALGE, SVDMatOp<Scalar> >* m_eigs;
int m_nconv;
Matrix m_evecs;
public:
// Constructor
PartialSVDSolver(ConstGenericMatrix& mat, int ncomp, int ncv) :
m_mat(mat), m_m(mat.rows()), m_n(mat.cols()), m_evecs(0, 0)
{
// Determine the matrix type, tall or wide
if (m_m > m_n)
{
m_op = new SVDTallMatOp<Scalar, MatrixType>(mat);
}
else
{
m_op = new SVDWideMatOp<Scalar, MatrixType>(mat);
}
// Solver object
m_eigs = new SymEigsSolver<Scalar, LARGEST_ALGE, SVDMatOp<Scalar> >(m_op, ncomp, ncv);
}
// Destructor
virtual ~PartialSVDSolver()
{
delete m_eigs;
delete m_op;
}
// Computation
int compute(int maxit = 1000, Scalar tol = 1e-10)
{
m_eigs->init();
m_nconv = m_eigs->compute(maxit, tol);
return m_nconv;
}
// The converged singular values
Vector singular_values() const
{
Vector svals = m_eigs->eigenvalues().cwiseSqrt();
return svals;
}
// The converged left singular vectors
Matrix matrix_U(int nu)
{
if (m_evecs.cols() < 1)
{
m_evecs = m_eigs->eigenvectors();
}
nu = std::min(nu, m_nconv);
if (m_m <= m_n)
{
return m_evecs.leftCols(nu);
}
return m_mat * (m_evecs.leftCols(nu).array().rowwise() / m_eigs->eigenvalues().head(nu).transpose().array().sqrt()).matrix();
}
// The converged right singular vectors
Matrix matrix_V(int nv)
{
if (m_evecs.cols() < 1)
{
m_evecs = m_eigs->eigenvectors();
}
nv = std::min(nv, m_nconv);
if (m_m > m_n)
{
return m_evecs.leftCols(nv);
}
return m_mat.transpose() * (m_evecs.leftCols(nv).array().rowwise() / m_eigs->eigenvalues().head(nv).transpose().array().sqrt()).matrix();
}
};
} // namespace Spectra
#endif // PARTIAL_SVD_SOLVER_H

View File

@ -14,7 +14,6 @@ set (gtsam_subdirs
sam
sfm
slam
smart
navigation
)
@ -137,6 +136,8 @@ target_include_directories(gtsam BEFORE PUBLIC
# SuiteSparse_config
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/SuiteSparse_config>
$<INSTALL_INTERFACE:include/gtsam/3rdparty/SuiteSparse_config>
# Spectra
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/Spectra>
# CCOLAMD
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/CCOLAMD/Include>
$<INSTALL_INTERFACE:include/gtsam/3rdparty/CCOLAMD>

View File

@ -55,11 +55,6 @@ public:
/// Merge the sets containing i1 and i2. Does nothing if i1 and i2 are already in the same set.
void merge(const size_t& i1, const size_t& i2);
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
inline size_t findSet(size_t key) const {return find(key);}
inline void makeUnionInPlace(const size_t& i1, const size_t& i2) {return merge(i1,i2);}
#endif
};
/**

View File

@ -30,6 +30,14 @@
#include <iostream>
#include <typeinfo> // operator typeid
#ifdef _WIN32
#define GENERICVALUE_VISIBILITY
#else
// This will trigger a LNKxxxx on MSVC, so disable for MSVC build
// Please refer to https://github.com/borglab/gtsam/blob/develop/Using-GTSAM-EXPORT.md
#define GENERICVALUE_VISIBILITY GTSAM_EXPORT
#endif
namespace gtsam {
/**
@ -84,7 +92,7 @@ public:
/// Virtual print function, uses traits
void print(const std::string& str) const override {
std::cout << "(" << demangle(typeid(T).name()) << ") ";
std::cout << "(" << demangle(typeid(T).name()) << ")\n";
traits<T>::Print(value_, str);
}
@ -198,4 +206,12 @@ const ValueType& Value::cast() const {
return dynamic_cast<const GenericValue<ValueType>&>(*this).value();
}
/** Functional constructor of GenericValue<T> so T can be automatically deduced
*/
template<class T>
GenericValue<T> genericValue(const T& v) {
return GenericValue<T>(v);
}
} /* namespace gtsam */

View File

@ -167,62 +167,6 @@ struct FixedDimension {
BOOST_STATIC_ASSERT_MSG(value != Eigen::Dynamic,
"FixedDimension instantiated for dymanically-sized type.");
};
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// Helper class to construct the product manifold of two other manifolds, M1 and M2
/// Deprecated because of limited usefulness, maximum obfuscation
template<typename M1, typename M2>
class ProductManifold: public std::pair<M1, M2> {
BOOST_CONCEPT_ASSERT((IsManifold<M1>));
BOOST_CONCEPT_ASSERT((IsManifold<M2>));
protected:
enum { dimension1 = traits<M1>::dimension };
enum { dimension2 = traits<M2>::dimension };
public:
enum { dimension = dimension1 + dimension2 };
inline static size_t Dim() { return dimension;}
inline size_t dim() const { return dimension;}
typedef Eigen::Matrix<double, dimension, 1> TangentVector;
typedef OptionalJacobian<dimension, dimension> ChartJacobian;
/// Default constructor needs default constructors to be defined
ProductManifold():std::pair<M1,M2>(M1(),M2()) {}
// Construct from two original manifold values
ProductManifold(const M1& m1, const M2& m2):std::pair<M1,M2>(m1,m2) {}
/// Retract delta to manifold
ProductManifold retract(const TangentVector& xi) const {
M1 m1 = traits<M1>::Retract(this->first, xi.template head<dimension1>());
M2 m2 = traits<M2>::Retract(this->second, xi.template tail<dimension2>());
return ProductManifold(m1,m2);
}
/// Compute the coordinates in the tangent space
TangentVector localCoordinates(const ProductManifold& other) const {
typename traits<M1>::TangentVector v1 = traits<M1>::Local(this->first, other.first);
typename traits<M2>::TangentVector v2 = traits<M2>::Local(this->second, other.second);
TangentVector v;
v << v1, v2;
return v;
}
// Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html
enum { NeedsToAlign = (sizeof(M1) % 16) == 0 || (sizeof(M2) % 16) == 0
};
public:
GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
};
// Define any direct product group to be a model of the multiplicative Group concept
template<typename M1, typename M2>
struct traits<ProductManifold<M1, M2> > : internal::Manifold<ProductManifold<M1, M2> > {
};
#endif
} // \ namespace gtsam
///**

View File

@ -136,20 +136,24 @@ Vector operator^(const Matrix& A, const Vector & v) {
return A.transpose() * v;
}
const Eigen::IOFormat& matlabFormat() {
static const Eigen::IOFormat matlab(
Eigen::StreamPrecision, // precision
Eigen::DontAlignCols, // flags set such that rowSpacers are not added
", ", // coeffSeparator
";\n", // rowSeparator
"\t", // rowPrefix
"", // rowSuffix
"[\n", // matPrefix
"\n]" // matSuffix
);
return matlab;
}
/* ************************************************************************* */
//3 argument call
void print(const Matrix& A, const string &s, ostream& stream) {
static const Eigen::IOFormat matlab(
Eigen::StreamPrecision, // precision
0, // flags
", ", // coeffSeparator
";\n", // rowSeparator
"\t", // rowPrefix
"", // rowSuffix
"[\n", // matPrefix
"\n]" // matSuffix
);
cout << s << A.format(matlab) << endl;
cout << s << A.format(matlabFormat()) << endl;
}
/* ************************************************************************* */

View File

@ -23,14 +23,11 @@
// \callgraph
#pragma once
#include <gtsam/base/OptionalJacobian.h>
#include <gtsam/base/Vector.h>
#include <gtsam/config.h>
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
#include <Eigen/Core>
#include <Eigen/Cholesky>
#include <Eigen/LU>
#endif
#include <boost/format.hpp>
#include <boost/function.hpp>
#include <boost/tuple/tuple.hpp>
@ -76,6 +73,10 @@ GTSAM_MAKE_MATRIX_DEFS(9);
typedef Eigen::Block<Matrix> SubMatrix;
typedef Eigen::Block<const Matrix> ConstSubMatrix;
// Matrix formatting arguments when printing.
// Akin to Matlab style.
const Eigen::IOFormat& matlabFormat();
/**
* equals with a tolerance
*/
@ -517,23 +518,6 @@ struct MultiplyWithInverseFunction {
const Operator phi_;
};
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
inline Matrix zeros( size_t m, size_t n ) { return Matrix::Zero(m,n); }
inline Matrix ones( size_t m, size_t n ) { return Matrix::Ones(m,n); }
inline Matrix eye( size_t m, size_t n) { return Matrix::Identity(m, n); }
inline Matrix eye( size_t m ) { return eye(m,m); }
inline Matrix diag(const Vector& v) { return v.asDiagonal(); }
inline void multiplyAdd(double alpha, const Matrix& A, const Vector& x, Vector& e) { e += alpha * A * x; }
inline void multiplyAdd(const Matrix& A, const Vector& x, Vector& e) { e += A * x; }
inline void transposeMultiplyAdd(double alpha, const Matrix& A, const Vector& e, Vector& x) { x += alpha * A.transpose() * e; }
inline void transposeMultiplyAdd(const Matrix& A, const Vector& e, Vector& x) { x += A.transpose() * e; }
inline void transposeMultiplyAdd(double alpha, const Matrix& A, const Vector& e, SubVector x) { x += alpha * A.transpose() * e; }
inline void insertColumn(Matrix& A, const Vector& col, size_t j) { A.col(j) = col; }
inline void insertColumn(Matrix& A, const Vector& col, size_t i, size_t j) { A.col(j).segment(i, col.size()) = col; }
inline void solve(Matrix& A, Matrix& B) { B = A.fullPivLu().solve(B); }
inline Matrix inverse(const Matrix& A) { return A.inverse(); }
#endif
GTSAM_EXPORT Matrix LLt(const Matrix& A);
GTSAM_EXPORT Matrix RtR(const Matrix& A);

View File

@ -256,26 +256,6 @@ GTSAM_EXPORT Vector concatVectors(const std::list<Vector>& vs);
* concatenate Vectors
*/
GTSAM_EXPORT Vector concatVectors(size_t nrVectors, ...);
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
inline Vector abs(const Vector& v){return v.cwiseAbs();}
inline Vector basis(size_t n, size_t i) { return Vector::Unit(n,i); }
inline Vector delta(size_t n, size_t i, double value){ return Vector::Unit(n, i) * value;}
inline size_t dim(const Vector& v) { return v.size(); }
inline Vector ediv(const Vector &a, const Vector &b) {assert (b.size()==a.size()); return a.cwiseQuotient(b);}
inline Vector esqrt(const Vector& v) { return v.cwiseSqrt();}
inline Vector emul(const Vector &a, const Vector &b) {assert (b.size()==a.size()); return a.cwiseProduct(b);}
inline double max(const Vector &a){return a.maxCoeff();}
inline double norm_2(const Vector& v) {return v.norm();}
inline Vector ones(size_t n) { return Vector::Ones(n); }
inline Vector reciprocal(const Vector &a) {return a.array().inverse();}
inline Vector repeat(size_t n, double value) {return Vector::Constant(n, value);}
inline const Vector sub(const Vector &v, size_t i1, size_t i2) {return v.segment(i1,i2-i1);}
inline void subInsert(Vector& fullVector, const Vector& subVector, size_t i) {fullVector.segment(i, subVector.size()) = subVector;}
inline double sum(const Vector &a){return a.sum();}
inline bool zero(const Vector& v){ return v.isZero(); }
inline Vector zero(size_t n) { return Vector::Zero(n); }
#endif
} // namespace gtsam
#include <boost/serialization/nvp.hpp>

View File

@ -89,7 +89,7 @@ void TimingOutline::print(const std::string& outline) const {
childOrder[child.second->myOrder_] = child.second;
}
// Print children
for(const ChildOrder::value_type order_child: childOrder) {
for(const ChildOrder::value_type& order_child: childOrder) {
std::string childOutline(outline);
childOutline += "| ";
order_child.second->print(childOutline);

View File

@ -70,7 +70,7 @@
#cmakedefine GTSAM_THROW_CHEIRALITY_EXCEPTION
// Make sure dependent projects that want it can see deprecated functions
#cmakedefine GTSAM_ALLOW_DEPRECATED_SINCE_V4
#cmakedefine GTSAM_ALLOW_DEPRECATED_SINCE_V41
// Publish flag about Eigen typedef
#cmakedefine GTSAM_TYPEDEF_POINTS_TO_VECTORS

View File

@ -69,7 +69,7 @@ namespace gtsam {
for(Key j: f.keys()) cs[j] = f.cardinality(j);
// Convert map into keys
DiscreteKeys keys;
for(const DiscreteKey& key: cs)
for(const std::pair<const Key,size_t>& key: cs)
keys.push_back(key);
// apply operand
ADT result = ADT::apply(f, op);

View File

@ -45,6 +45,7 @@ class GTSAM_EXPORT DiscreteBayesTreeClique
typedef boost::shared_ptr<This> shared_ptr;
typedef boost::weak_ptr<This> weak_ptr;
DiscreteBayesTreeClique() {}
virtual ~DiscreteBayesTreeClique() {}
DiscreteBayesTreeClique(
const boost::shared_ptr<DiscreteConditional>& conditional)
: Base(conditional) {}

View File

@ -56,7 +56,7 @@ bool Potentials::equals(const Potentials& other, double tol) const {
/* ************************************************************************* */
void Potentials::print(const string& s, const KeyFormatter& formatter) const {
cout << s << "\n Cardinalities: {";
for (const DiscreteKey& key : cardinalities_)
for (const std::pair<const Key,size_t>& key : cardinalities_)
cout << formatter(key.first) << ":" << key.second << ", ";
cout << "}" << endl;
ADT::print(" ");

View File

@ -176,17 +176,6 @@ class EssentialMatrix {
/// @}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
Point3 transform_to(const Point3& p,
OptionalJacobian<3, 5> DE = boost::none,
OptionalJacobian<3, 3> Dpoint = boost::none) const {
return transformTo(p, DE, Dpoint);
};
/// @}
#endif
private:
/// @name Advanced Interface
/// @{

View File

@ -79,18 +79,6 @@ ostream &operator<<(ostream &os, const Point2& p) {
return os;
}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
boost::optional<Point2> CircleCircleIntersection(double R_d, double r_d, double tol) {
return circleCircleIntersection(R_d, r_d, tol);
}
std::list<Point2> CircleCircleIntersection(Point2 c1, Point2 c2, boost::optional<Point2> fh) {
return circleCircleIntersection(c1, c2, fh);
}
std::list<Point2> CircleCircleIntersection(Point2 c1, double r1, Point2 c2, double r2, double tol) {
return circleCircleIntersection(c1, r1, c2, r2, tol);
}
#endif
#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS
/* ************************************************************************* */

View File

@ -46,12 +46,7 @@ public:
/// @{
/// default constructor
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
// Deprecated default constructor initializes to zero, in contrast to new behavior below
Point2() { setZero(); }
#else
Point2() {}
#endif
Point2() {}
using Vector2::Vector2;
@ -113,25 +108,7 @@ public:
/// Streaming
GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point2& p);
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
Point2 inverse() const { return -(*this); }
Point2 compose(const Point2& q) const { return (*this)+q;}
Point2 between(const Point2& q) const { return q-(*this);}
Vector2 localCoordinates(const Point2& q) const { return between(q);}
Point2 retract(const Vector2& v) const { return compose(Point2(v));}
static Vector2 Logmap(const Point2& p) { return p;}
static Point2 Expmap(const Vector2& v) { return Point2(v);}
inline double dist(const Point2& p2) const {return distance(p2);}
GTSAM_EXPORT static boost::optional<Point2> CircleCircleIntersection(double R_d, double r_d, double tol = 1e-9);
GTSAM_EXPORT static std::list<Point2> CircleCircleIntersection(Point2 c1, Point2 c2, boost::optional<Point2> fh);
GTSAM_EXPORT static std::list<Point2> CircleCircleIntersection(Point2 c1, double r1, Point2 c2, double r2, double tol = 1e-9);
/// @}
#endif
private:
private:
/// @name Advanced Interface
/// @{

View File

@ -62,23 +62,6 @@ ostream &operator<<(ostream &os, const Point3& p) {
return os;
}
/* ************************************************************************* */
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
Point3 Point3::add(const Point3 &q, OptionalJacobian<3,3> H1,
OptionalJacobian<3,3> H2) const {
if (H1) *H1 = I_3x3;
if (H2) *H2 = I_3x3;
return *this + q;
}
Point3 Point3::sub(const Point3 &q, OptionalJacobian<3,3> H1,
OptionalJacobian<3,3> H2) const {
if (H1) *H1 = I_3x3;
if (H2) *H2 = -I_3x3;
return *this - q;
}
#endif
#endif
/* ************************************************************************* */
double distance3(const Point3 &p1, const Point3 &q, OptionalJacobian<1, 3> H1,

View File

@ -51,11 +51,6 @@ class Point3 : public Vector3 {
/// @name Standard Constructors
/// @{
// Deprecated default constructor initializes to zero, in contrast to new behavior below
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
Point3() { setZero(); }
#endif
using Vector3::Vector3;
/// @}
@ -118,27 +113,7 @@ class Point3 : public Vector3 {
/// Output stream operator
GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point3& p);
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
Point3 inverse() const { return -(*this);}
Point3 compose(const Point3& q) const { return (*this)+q;}
Point3 between(const Point3& q) const { return q-(*this);}
Vector3 localCoordinates(const Point3& q) const { return between(q);}
Point3 retract(const Vector3& v) const { return compose(Point3(v));}
static Vector3 Logmap(const Point3& p) { return p;}
static Point3 Expmap(const Vector3& v) { return Point3(v);}
inline double dist(const Point3& q) const { return (q - *this).norm(); }
Point3 normalize(OptionalJacobian<3, 3> H = boost::none) const { return normalized(H);}
GTSAM_EXPORT Point3 add(const Point3& q, OptionalJacobian<3, 3> H1 = boost::none,
OptionalJacobian<3, 3> H2 = boost::none) const;
GTSAM_EXPORT Point3 sub(const Point3& q, OptionalJacobian<3, 3> H1 = boost::none,
OptionalJacobian<3, 3> H2 = boost::none) const;
/// @}
#endif
private:
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>

View File

@ -322,10 +322,10 @@ boost::optional<Pose2> align(const vector<Point2Pair>& pairs) {
// calculate cos and sin
double c=0,s=0;
for(const Point2Pair& pair: pairs) {
Point2 dq = pair.first - cp;
Point2 dp = pair.second - cq;
c += dp.x() * dq.x() + dp.y() * dq.y();
s += dp.y() * dq.x() - dp.x() * dq.y(); // this works but is negative from formula above !! :-(
Point2 dp = pair.first - cp;
Point2 dq = pair.second - cq;
c += dp.x() * dq.x() + dp.y() * dq.y();
s += -dp.y() * dq.x() + dp.x() * dq.y();
}
// calculate angle and translation

View File

@ -289,22 +289,6 @@ public:
/// @}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
Point2 transform_from(const Point2& point,
OptionalJacobian<2, 3> Dpose = boost::none,
OptionalJacobian<2, 2> Dpoint = boost::none) const {
return transformFrom(point, Dpose, Dpoint);
}
Point2 transform_to(const Point2& point,
OptionalJacobian<2, 3> Dpose = boost::none,
OptionalJacobian<2, 2> Dpoint = boost::none) const {
return transformTo(point, Dpose, Dpoint);
}
/// @}
#endif
private:
// Serialization function

View File

@ -106,9 +106,7 @@ Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
/* ************************************************************************* */
void Pose3::print(const string& s) const {
cout << s;
R_.print("R:\n");
cout << t_ << ";" << endl;
cout << (s.empty() ? s : s + " ") << *this << endl;
}
/* ************************************************************************* */
@ -292,15 +290,6 @@ Pose3 Pose3::transformPoseFrom(const Pose3& aTb, OptionalJacobian<6, 6> Hself,
return wTa.compose(aTb, Hself, HaTb);
}
/* ************************************************************************* */
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
Pose3 Pose3::transform_to(const Pose3& pose) const {
Rot3 cRv = R_ * Rot3(pose.R_.inverse());
Point3 t = pose.transform_to(t_);
return Pose3(cRv, t);
}
#endif
/* ************************************************************************* */
Pose3 Pose3::transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> Hself,
OptionalJacobian<6, 6> HwTb) const {
@ -439,9 +428,9 @@ boost::optional<Pose3> align(const vector<Point3Pair>& baPointPairs) {
/* ************************************************************************* */
std::ostream &operator<<(std::ostream &os, const Pose3& pose) {
os << pose.rotation() << "\n";
const Point3& t = pose.translation();
os << '[' << t.x() << ", " << t.y() << ", " << t.z() << "]\';\n";
// Both Rot3 and Point3 have ostream definitions so we use them.
os << "R: " << pose.rotation() << "\n";
os << "t: " << pose.translation();
return os;
}

View File

@ -326,30 +326,6 @@ public:
GTSAM_EXPORT
friend std::ostream &operator<<(std::ostream &os, const Pose3& p);
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
Point3 transform_from(const Point3& point,
OptionalJacobian<3, 6> Hself = boost::none,
OptionalJacobian<3, 3> Hpoint = boost::none) const {
return transformFrom(point, Hself, Hpoint);
}
Point3 transform_to(const Point3& point,
OptionalJacobian<3, 6> Hself = boost::none,
OptionalJacobian<3, 3> Hpoint = boost::none) const {
return transformTo(point, Hself, Hpoint);
}
Pose3 transform_pose_to(const Pose3& pose,
OptionalJacobian<6, 6> Hself = boost::none,
OptionalJacobian<6, 6> Hpose = boost::none) const {
return transformPoseTo(pose, Hself, Hpose);
}
/**
* @deprecated: this function is neither here not there. */
Pose3 transform_to(const Pose3& pose) const;
/// @}
#endif
private:
/** Serialization function */
friend class boost::serialization::access;
@ -380,11 +356,6 @@ inline Matrix wedge<Pose3>(const Vector& xi) {
return Pose3::wedge(xi(0), xi(1), xi(2), xi(3), xi(4), xi(5));
}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
// deprecated: use Pose3::Align with point pairs ordered the opposite way
GTSAM_EXPORT boost::optional<Pose3> align(const std::vector<Point3Pair>& baPointPairs);
#endif
// For MATLAB wrapper
typedef std::vector<Pose3> Pose3Vector;

View File

@ -39,6 +39,13 @@ Rot2 Rot2::atan2(double y, double x) {
return R.normalize();
}
/* ************************************************************************* */
Rot2 Rot2::Random(std::mt19937& rng) {
uniform_real_distribution<double> randomAngle(-M_PI, M_PI);
double angle = randomAngle(rng);
return fromAngle(angle);
}
/* ************************************************************************* */
void Rot2::print(const string& s) const {
cout << s << ": " << theta() << endl;

View File

@ -22,6 +22,8 @@
#include <gtsam/base/Lie.h>
#include <boost/optional.hpp>
#include <random>
namespace gtsam {
/**
@ -79,6 +81,14 @@ namespace gtsam {
/** Named constructor that behaves as atan2, i.e., y,x order (!) and normalizes */
static Rot2 atan2(double y, double x);
/**
* Random, generates random angle \in [-p,pi]
* Example:
* std::mt19937 engine(42);
* Unit3 unit = Unit3::Random(engine);
*/
static Rot2 Random(std::mt19937 & rng);
/// @}
/// @name Testable
/// @{

View File

@ -32,7 +32,8 @@ namespace gtsam {
/* ************************************************************************* */
void Rot3::print(const std::string& s) const {
gtsam::print((Matrix)matrix(), s);
cout << (s.empty() ? "R: " : s + " ");
gtsam::print(static_cast<Matrix>(matrix()));
}
/* ************************************************************************* */
@ -222,10 +223,7 @@ pair<Matrix3, Vector3> RQ(const Matrix3& A) {
/* ************************************************************************* */
ostream &operator<<(ostream &os, const Rot3& R) {
os << "\n";
os << '|' << R.r1().x() << ", " << R.r2().x() << ", " << R.r3().x() << "|\n";
os << '|' << R.r1().y() << ", " << R.r2().y() << ", " << R.r3().y() << "|\n";
os << '|' << R.r1().z() << ", " << R.r2().z() << ", " << R.r3().z() << "|\n";
os << R.matrix().format(matlabFormat());
return os;
}

View File

@ -254,7 +254,7 @@ namespace gtsam {
/// @{
/** print */
void print(const std::string& s="R") const;
void print(const std::string& s="") const;
/** equals with an tolerance */
bool equals(const Rot3& p, double tol = 1e-9) const;
@ -500,23 +500,6 @@ namespace gtsam {
/// @}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
static Rot3 rodriguez(const Point3& axis, double angle) { return AxisAngle(axis, angle); }
static Rot3 rodriguez(const Unit3& axis, double angle) { return AxisAngle(axis, angle); }
static Rot3 rodriguez(const Vector3& w) { return Rodrigues(w); }
static Rot3 rodriguez(double wx, double wy, double wz) { return Rodrigues(wx, wy, wz); }
static Rot3 yaw (double t) { return Yaw(t); }
static Rot3 pitch(double t) { return Pitch(t); }
static Rot3 roll (double t) { return Roll(t); }
static Rot3 ypr(double y, double p, double r) { return Ypr(r,p,y);}
static Rot3 quaternion(double w, double x, double y, double z) {
return Rot3::Quaternion(w, x, y, z);
}
/// @}
#endif
private:
/** Serialization function */
friend class boost::serialization::access;

View File

@ -26,13 +26,13 @@ using namespace std;
namespace gtsam {
// Implementation for N>5 just uses dynamic version
// Implementation for N>=5 just uses dynamic version
template <int N>
typename SO<N>::MatrixNN SO<N>::Hat(const TangentVector& xi) {
return SOn::Hat(xi);
}
// Implementation for N>5 just uses dynamic version
// Implementation for N>=5 just uses dynamic version
template <int N>
typename SO<N>::TangentVector SO<N>::Vee(const MatrixNN& X) {
return SOn::Vee(X);
@ -60,8 +60,9 @@ typename SO<N>::TangentVector SO<N>::ChartAtOrigin::Local(const SO& R,
template <int N>
typename SO<N>::MatrixDD SO<N>::AdjointMap() const {
if (N==2) return I_1x1; // SO(2) case
throw std::runtime_error(
"SO<N>::AdjointMap only implemented for SO3 and SO4.");
"SO<N>::AdjointMap only implemented for SO2, SO3 and SO4.");
}
template <int N>
@ -84,30 +85,22 @@ typename SO<N>::MatrixDD SO<N>::LogmapDerivative(const TangentVector& omega) {
throw std::runtime_error("O<N>::LogmapDerivative only implemented for SO3.");
}
// Default fixed size version (but specialized elsewehere for N=2,3,4)
template <int N>
typename SO<N>::VectorN2 SO<N>::vec(
OptionalJacobian<internal::NSquaredSO(N), dimension> H) const {
const size_t n = rows();
const size_t n2 = n * n;
// Vectorize
VectorN2 X(n2);
X << Eigen::Map<const Matrix>(matrix_.data(), n2, 1);
VectorN2 X = Eigen::Map<const VectorN2>(matrix_.data());
// If requested, calculate H as (I \oplus Q) * P,
// where Q is the N*N rotation matrix, and P is calculated below.
if (H) {
// Calculate P matrix of vectorized generators
// TODO(duy): Should we refactor this as the jacobian of Hat?
const size_t d = dim();
Matrix P(n2, d);
for (size_t j = 0; j < d; j++) {
const auto X = Hat(Eigen::VectorXd::Unit(d, j));
P.col(j) = Eigen::Map<const Matrix>(X.data(), n2, 1);
}
H->resize(n2, d);
for (size_t i = 0; i < n; i++) {
H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d);
Matrix P = SO<N>::VectorizedGenerators();
for (size_t i = 0; i < N; i++) {
H->block(i * N, 0, N, dimension) =
matrix_ * P.block(i * N, 0, N, dimension);
}
}
return X;

View File

@ -22,21 +22,18 @@
namespace gtsam {
template <>
GTSAM_EXPORT
Matrix SOn::Hat(const Vector& xi) {
GTSAM_EXPORT void SOn::Hat(const Vector &xi, Eigen::Ref<Matrix> X) {
size_t n = AmbientDim(xi.size());
if (n < 2) throw std::invalid_argument("SO<N>::Hat: n<2 not supported");
Matrix X(n, n); // allocate space for n*n skew-symmetric matrix
X.setZero();
if (n == 2) {
if (n < 2)
throw std::invalid_argument("SO<N>::Hat: n<2 not supported");
else if (n == 2) {
// Handle SO(2) case as recursion bottom
assert(xi.size() == 1);
X << 0, -xi(0), xi(0), 0;
} else {
// Recursively call SO(n-1) call for top-left block
const size_t dmin = (n - 1) * (n - 2) / 2;
X.topLeftCorner(n - 1, n - 1) = Hat(xi.tail(dmin));
Hat(xi.tail(dmin), X.topLeftCorner(n - 1, n - 1));
// determine sign of last element (signs alternate)
double sign = pow(-1.0, xi.size());
@ -47,7 +44,14 @@ Matrix SOn::Hat(const Vector& xi) {
X(j, n - 1) = -X(n - 1, j);
sign = -sign;
}
X(n - 1, n - 1) = 0; // bottom-right
}
}
template <> GTSAM_EXPORT Matrix SOn::Hat(const Vector &xi) {
size_t n = AmbientDim(xi.size());
Matrix X(n, n); // allocate space for n*n skew-symmetric matrix
SOn::Hat(xi, X);
return X;
}
@ -99,4 +103,27 @@ SOn LieGroup<SOn, Eigen::Dynamic>::between(const SOn& g, DynamicJacobian H1,
return result;
}
// Dynamic version of vec
template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const {
const size_t n = rows(), n2 = n * n;
// Vectorize
VectorN2 X(n2);
X << Eigen::Map<const Matrix>(matrix_.data(), n2, 1);
// If requested, calculate H as (I \oplus Q) * P,
// where Q is the N*N rotation matrix, and P is calculated below.
if (H) {
// Calculate P matrix of vectorized generators
// TODO(duy): Should we refactor this as the jacobian of Hat?
Matrix P = SOn::VectorizedGenerators(n);
const size_t d = dim();
H->resize(n2, d);
for (size_t i = 0; i < n; i++) {
H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d);
}
}
return X;
}
} // namespace gtsam

View File

@ -98,8 +98,8 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
template <typename Derived, int N_ = N, typename = IsDynamic<N_>>
static SO Lift(size_t n, const Eigen::MatrixBase<Derived> &R) {
Matrix Q = Matrix::Identity(n, n);
size_t p = R.rows();
assert(p < n && R.cols() == p);
const int p = R.rows();
assert(p >= 0 && p <= static_cast<int>(n) && R.cols() == p);
Q.topLeftCorner(p, p) = R;
return SO(Q);
}
@ -208,7 +208,7 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
// Calculate run-time dimensionality of manifold.
// Available as dimension or Dim() for fixed N.
size_t dim() const { return Dimension(matrix_.rows()); }
size_t dim() const { return Dimension(static_cast<size_t>(matrix_.rows())); }
/**
* Hat operator creates Lie algebra element corresponding to d-vector, where d
@ -227,9 +227,10 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
*/
static MatrixNN Hat(const TangentVector& xi);
/**
* Inverse of Hat. See note about xi element order in Hat.
*/
/// In-place version of Hat (see details there), implements recursion.
static void Hat(const Vector &xi, Eigen::Ref<MatrixNN> X);
/// Inverse of Hat. See note about xi element order in Hat.
static TangentVector Vee(const MatrixNN& X);
// Chart at origin
@ -290,7 +291,34 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
* */
VectorN2 vec(OptionalJacobian<internal::NSquaredSO(N), dimension> H =
boost::none) const;
/// @}
/// Calculate N^2 x dim matrix of vectorized Lie algebra generators for SO(N)
template <int N_ = N, typename = IsFixed<N_>>
static Matrix VectorizedGenerators() {
constexpr size_t N2 = static_cast<size_t>(N * N);
Eigen::Matrix<double, N2, dimension> G;
for (size_t j = 0; j < dimension; j++) {
const auto X = Hat(Vector::Unit(dimension, j));
G.col(j) = Eigen::Map<const VectorN2>(X.data());
}
return G;
}
/// Calculate n^2 x dim matrix of vectorized Lie algebra generators for SO(n)
template <int N_ = N, typename = IsDynamic<N_>>
static Matrix VectorizedGenerators(size_t n = 0) {
const size_t n2 = n * n, dim = Dimension(n);
Matrix G(n2, dim);
for (size_t j = 0; j < dim; j++) {
const auto X = Hat(Vector::Unit(dim, j));
G.col(j) = Eigen::Map<const Matrix>(X.data(), n2, 1);
}
return G;
}
/// @{
/// @name Serialization
/// @{
template <class Archive>
friend void save(Archive&, SO&, const unsigned int);
@ -300,6 +328,8 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
friend void serialize(Archive&, SO&, const unsigned int);
friend class boost::serialization::access;
friend class Rot3; // for serialize
/// @}
};
using SOn = SO<Eigen::Dynamic>;
@ -333,6 +363,11 @@ template <>
SOn LieGroup<SOn, Eigen::Dynamic>::between(const SOn& g, DynamicJacobian H1,
DynamicJacobian H2) const;
/*
* Specialize dynamic vec.
*/
template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const;
/** Serialization function */
template<class Archive>
void serialize(

View File

@ -862,7 +862,15 @@ TEST( Pose3, stream)
Pose3 T;
std::ostringstream os;
os << T;
EXPECT(os.str() == "\n|1, 0, 0|\n|0, 1, 0|\n|0, 0, 1|\n\n[0, 0, 0]';\n");
string expected;
#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS
expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: 0\n0\n0";;
#else
expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: [0, 0, 0]'";
#endif
EXPECT(os.str() == expected);
}
//******************************************************************************
@ -1032,19 +1040,22 @@ TEST(Pose3, print) {
std::stringstream expected;
Point3 translation(1, 2, 3);
// Add expected rotation
expected << "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\n";
#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS
expected << "1\n"
expected << "t: 1\n"
"2\n"
"3;\n";
"3\n";
#else
expected << '[' << translation.x() << ", " << translation.y() << ", " << translation.z() << "]\';";
expected << "t: [" << translation.x() << ", " << translation.y() << ", " << translation.z() << "]'\n";
#endif
// reset cout to the original stream
std::cout.rdbuf(oldbuf);
// Get substring corresponding to translation part
std::string actual = redirectStream.str().substr(38, 11);
std::string actual = redirectStream.str();
CHECK_EQUAL(expected.str(), actual);
}

View File

@ -608,7 +608,8 @@ TEST( Rot3, stream)
Rot3 R;
std::ostringstream os;
os << R;
EXPECT(os.str() == "\n|1, 0, 0|\n|0, 1, 0|\n|0, 0, 1|\n");
string expected = "[\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]";
EXPECT(os.str() == expected);
}
/* ************************************************************************* */

View File

@ -39,8 +39,8 @@ using namespace std;
using namespace gtsam;
//******************************************************************************
// Test dhynamic with n=0
TEST(SOn, SO0) {
// Test dynamic with n=0
TEST(SOn, SOn0) {
const auto R = SOn(0);
EXPECT_LONGS_EQUAL(0, R.rows());
EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension);
@ -50,7 +50,8 @@ TEST(SOn, SO0) {
}
//******************************************************************************
TEST(SOn, SO5) {
// Test dynamic with n=5
TEST(SOn, SOn5) {
const auto R = SOn(5);
EXPECT_LONGS_EQUAL(5, R.rows());
EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension);
@ -59,6 +60,28 @@ TEST(SOn, SO5) {
EXPECT_LONGS_EQUAL(10, traits<SOn>::GetDimension(R));
}
//******************************************************************************
// Test fixed with n=2
TEST(SOn, SO0) {
const auto R = SO<2>();
EXPECT_LONGS_EQUAL(2, R.rows());
EXPECT_LONGS_EQUAL(1, SO<2>::dimension);
EXPECT_LONGS_EQUAL(1, SO<2>::Dim());
EXPECT_LONGS_EQUAL(1, R.dim());
EXPECT_LONGS_EQUAL(1, traits<SO<2>>::GetDimension(R));
}
//******************************************************************************
// Test fixed with n=5
TEST(SOn, SO5) {
const auto R = SO<5>();
EXPECT_LONGS_EQUAL(5, R.rows());
EXPECT_LONGS_EQUAL(10, SO<5>::dimension);
EXPECT_LONGS_EQUAL(10, SO<5>::Dim());
EXPECT_LONGS_EQUAL(10, R.dim());
EXPECT_LONGS_EQUAL(10, traits<SO<5>>::GetDimension(R));
}
//******************************************************************************
TEST(SOn, Concept) {
BOOST_CONCEPT_ASSERT((IsGroup<SOn>));
@ -105,29 +128,29 @@ TEST(SOn, HatVee) {
EXPECT(assert_equal((Vector)v.head<1>(), SOn::Vee(actual2)));
Matrix expected3(3, 3);
expected3 << 0, -3, 2, //
3, 0, -1, //
-2, 1, 0;
expected3 << 0, -3, 2, //
3, 0, -1, //
-2, 1, 0;
const auto actual3 = SOn::Hat(v.head<3>());
EXPECT(assert_equal(expected3, actual3));
EXPECT(assert_equal(skewSymmetric(1, 2, 3), actual3));
EXPECT(assert_equal((Vector)v.head<3>(), SOn::Vee(actual3)));
Matrix expected4(4, 4);
expected4 << 0, -6, 5, 3, //
6, 0, -4, -2, //
-5, 4, 0, 1, //
-3, 2, -1, 0;
expected4 << 0, -6, 5, 3, //
6, 0, -4, -2, //
-5, 4, 0, 1, //
-3, 2, -1, 0;
const auto actual4 = SOn::Hat(v.head<6>());
EXPECT(assert_equal(expected4, actual4));
EXPECT(assert_equal((Vector)v.head<6>(), SOn::Vee(actual4)));
Matrix expected5(5, 5);
expected5 << 0,-10, 9, 7, -4, //
10, 0, -8, -6, 3, //
-9, 8, 0, 5, -2, //
-7, 6, -5, 0, 1, //
4, -3, 2, -1, 0;
expected5 << 0, -10, 9, 7, -4, //
10, 0, -8, -6, 3, //
-9, 8, 0, 5, -2, //
-7, 6, -5, 0, 1, //
4, -3, 2, -1, 0;
const auto actual5 = SOn::Hat(v);
EXPECT(assert_equal(expected5, actual5));
EXPECT(assert_equal((Vector)v, SOn::Vee(actual5)));
@ -159,6 +182,22 @@ TEST(SOn, RetractLocal) {
CHECK(assert_equal(v1, SOn::ChartAtOrigin::Local(Q1), 1e-7));
}
//******************************************************************************
Matrix RetractJacobian(size_t n) { return SOn::VectorizedGenerators(n); }
/// Test Jacobian of Retract at origin
TEST(SOn, RetractJacobian) {
Matrix actualH = RetractJacobian(3);
boost::function<Matrix(const Vector &)> h = [](const Vector &v) {
return SOn::ChartAtOrigin::Retract(v).matrix();
};
Vector3 v;
v.setZero();
const Matrix expectedH = numericalDerivative11<Matrix, Vector, 3>(h, v, 1e-5);
CHECK(assert_equal(expectedH, actualH));
}
//******************************************************************************
TEST(SOn, vec) {
Vector10 v;
@ -166,11 +205,28 @@ TEST(SOn, vec) {
SOn Q = SOn::ChartAtOrigin::Retract(v);
Matrix actualH;
const Vector actual = Q.vec(actualH);
boost::function<Vector(const SOn&)> h = [](const SOn& Q) { return Q.vec(); };
boost::function<Vector(const SOn &)> h = [](const SOn &Q) { return Q.vec(); };
const Matrix H = numericalDerivative11<Vector, SOn, 10>(h, Q, 1e-5);
CHECK(assert_equal(H, actualH));
}
//******************************************************************************
TEST(SOn, VectorizedGenerators) {
// Default fixed
auto actual2 = SO<2>::VectorizedGenerators();
CHECK(actual2.rows()==4 && actual2.cols()==1)
// Specialized
auto actual3 = SO<3>::VectorizedGenerators();
CHECK(actual3.rows()==9 && actual3.cols()==3)
auto actual4 = SO<4>::VectorizedGenerators();
CHECK(actual4.rows()==16 && actual4.cols()==6)
// Dynamic
auto actual5 = SOn::VectorizedGenerators(5);
CHECK(actual5.rows()==25 && actual5.cols()==10)
}
//******************************************************************************
int main() {
TestResult tr;

View File

@ -123,27 +123,6 @@ std::pair<NonlinearFactorGraph, Values> triangulationGraph(
return std::make_pair(graph, values);
}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// DEPRECATED: PinholeCamera specific version
template<class CALIBRATION>
Point3 triangulateNonlinear(
const CameraSet<PinholeCamera<CALIBRATION> >& cameras,
const Point2Vector& measurements, const Point3& initialEstimate) {
return triangulateNonlinear<PinholeCamera<CALIBRATION> > //
(cameras, measurements, initialEstimate);
}
/// DEPRECATED: PinholeCamera specific version
template<class CALIBRATION>
std::pair<NonlinearFactorGraph, Values> triangulationGraph(
const CameraSet<PinholeCamera<CALIBRATION> >& cameras,
const Point2Vector& measurements, Key landmarkKey,
const Point3& initialEstimate) {
return triangulationGraph<PinholeCamera<CALIBRATION> > //
(cameras, measurements, landmarkKey, initialEstimate);
}
#endif
/**
* Optimize for triangulation
* @param graph nonlinear factors for projection

View File

@ -249,25 +249,6 @@ namespace gtsam {
// Friend JunctionTree because it directly fills roots and nodes index.
template<class BAYESRTEE, class GRAPH> friend class EliminatableClusterTree;
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
public:
/// @name Deprecated
/// @{
void removePath(sharedClique clique, BayesNetType& bn, Cliques& orphans) {
removePath(clique, &bn, &orphans);
}
void removeTop(const KeyVector& keys, BayesNetType& bn, Cliques& orphans) {
removeTop(keys, &bn, &orphans);
}
void getCliqueData(BayesTreeCliqueData& stats, sharedClique clique) const {
getCliqueData(clique, &stats);
}
void addFactorsToGraph(FactorGraph<FactorType>& graph) const{
addFactorsToGraph(& graph);
}
/// @}
#endif
private:
/** Serialization function */
friend class boost::serialization::access;

View File

@ -91,6 +91,7 @@ namespace gtsam {
template<class DERIVED, class FACTORGRAPH>
size_t BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::numCachedSeparatorMarginals() const
{
std::lock_guard<std::mutex> marginalLock(cachedSeparatorMarginalMutex_);
if (!cachedSeparatorMarginal_)
return 0;
@ -144,6 +145,7 @@ namespace gtsam {
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::separatorMarginal(
Eliminate function) const {
std::lock_guard<std::mutex> marginalLock(cachedSeparatorMarginalMutex_);
gttic(BayesTreeCliqueBase_separatorMarginal);
// Check if the Separator marginal was already calculated
if (!cachedSeparatorMarginal_) {
@ -206,6 +208,8 @@ namespace gtsam {
// When a shortcut is requested, all of the shortcuts between it and the
// root are also generated. So, if this clique's cached shortcut is set,
// recursively call over all child cliques. Otherwise, it is unnecessary.
std::lock_guard<std::mutex> marginalLock(cachedSeparatorMarginalMutex_);
if (cachedSeparatorMarginal_) {
for(derived_ptr& child: children) {
child->deleteCachedShortcuts();

View File

@ -24,6 +24,7 @@
#include <boost/optional.hpp>
#include <string>
#include <mutex>
namespace gtsam {
@ -75,10 +76,28 @@ namespace gtsam {
/** Construct from a conditional, leaving parent and child pointers uninitialized */
BayesTreeCliqueBase(const sharedConditional& conditional) : conditional_(conditional), problemSize_(1) {}
/** Shallow copy constructor */
BayesTreeCliqueBase(const BayesTreeCliqueBase& c) : conditional_(c.conditional_), parent_(c.parent_), children(c.children), problemSize_(c.problemSize_), is_root(c.is_root) {}
/** Shallow copy assignment constructor */
BayesTreeCliqueBase& operator=(const BayesTreeCliqueBase& c) {
conditional_ = c.conditional_;
parent_ = c.parent_;
children = c.children;
problemSize_ = c.problemSize_;
is_root = c.is_root;
return *this;
}
/// @}
/// This stores the Cached separator margnal P(S)
/// This stores the Cached separator marginal P(S)
mutable boost::optional<FactorGraphType> cachedSeparatorMarginal_;
/// This protects Cached seperator marginal P(S) from concurrent read/writes
/// as many the functions which access it are const (hence the mutable)
/// leading to the false impression that these const functions are thread-safe
/// which is not true due to these mutable values. This is fixed by applying this mutex.
mutable std::mutex cachedSeparatorMarginalMutex_;
public:
sharedConditional conditional_;
@ -144,7 +163,9 @@ namespace gtsam {
void deleteCachedShortcuts();
const boost::optional<FactorGraphType>& cachedSeparatorMarginal() const {
return cachedSeparatorMarginal_; }
std::lock_guard<std::mutex> marginalLock(cachedSeparatorMarginalMutex_);
return cachedSeparatorMarginal_;
}
friend class BayesTree<DerivedType>;
@ -159,7 +180,10 @@ namespace gtsam {
KeyVector shortcut_indices(const derived_ptr& B, const FactorGraphType& p_Cp_B) const;
/** Non-recursive delete cached shortcuts and marginals - internal only. */
void deleteCachedShortcutsNonRecursive() { cachedSeparatorMarginal_ = boost::none; }
void deleteCachedShortcutsNonRecursive() {
std::lock_guard<std::mutex> marginalLock(cachedSeparatorMarginalMutex_);
cachedSeparatorMarginal_ = boost::none;
}
private:

View File

@ -72,17 +72,6 @@ class ISAM : public BAYESTREE {
const Eliminate& function = EliminationTraitsType::DefaultEliminate);
/// @}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
void update_internal(
const FactorGraphType& newFactors, Cliques& orphans,
const Eliminate& function = EliminationTraitsType::DefaultEliminate) {
updateInternal(newFactors, &orphans, function);
}
/// @}
#endif
};
} // namespace gtsam

View File

@ -41,6 +41,7 @@ namespace gtsam {
typedef boost::shared_ptr<This> shared_ptr;
typedef boost::weak_ptr<This> weak_ptr;
GaussianBayesTreeClique() {}
virtual ~GaussianBayesTreeClique() {}
GaussianBayesTreeClique(const boost::shared_ptr<GaussianConditional>& conditional) : Base(conditional) {}
};

View File

@ -128,17 +128,9 @@ namespace gtsam {
/** Scale the values in \c gy according to the sigmas for the frontal variables in this
* conditional. */
void scaleFrontalsBySigma(VectorValues& gy) const;
// __declspec(deprecated) void scaleFrontalsBySigma(VectorValues& gy) const; // FIXME: depreciated flag doesn't appear to exist?
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
constABlock get_R() const { return R(); }
constABlock get_S() const { return S(); }
constABlock get_S(const_iterator it) const { return S(it); }
const constBVector get_d() const { return d(); }
/// @}
#endif
// FIXME: deprecated flag doesn't appear to exist?
// __declspec(deprecated) void scaleFrontalsBySigma(VectorValues& gy) const;
private:
/** Serialization function */

View File

@ -354,16 +354,7 @@ namespace gtsam {
/// Solve the system A'*A delta = A'*b in-place, return delta as VectorValues
VectorValues solve();
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
const SymmetricBlockMatrix& matrixObject() const { return info_; }
/// @}
#endif
private:
private:
/// Allocate for given scatter pattern
void Allocate(const Scatter& scatter);

View File

@ -421,21 +421,11 @@ JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph,
/* ************************************************************************* */
void JacobianFactor::print(const string& s,
const KeyFormatter& formatter) const {
static const Eigen::IOFormat matlab(
Eigen::StreamPrecision, // precision
0, // flags
" ", // coeffSeparator
";\n", // rowSeparator
"\t", // rowPrefix
"", // rowSuffix
"[\n", // matPrefix
"\n ]" // matSuffix
);
if (!s.empty())
cout << s << "\n";
for (const_iterator key = begin(); key != end(); ++key) {
cout << boost::format(" A[%1%] = ") % formatter(*key);
cout << getA(key).format(matlab) << endl;
cout << getA(key).format(matlabFormat()) << endl;
}
cout << formatMatrixIndented(" b = ", getb(), true) << "\n";
if (model_)

Some files were not shown because too many files have changed in this diff Show More