Added Spectra 0.9.0 to 3rdparty
parent
ac0150ba5d
commit
7cfc5c3522
2
LICENSE
2
LICENSE
|
@ -23,3 +23,5 @@ ordering library
|
|||
- Included unmodified in gtsam/3rdparty/metis
|
||||
- Licenced under Apache License v 2.0, provided in
|
||||
gtsam/3rdparty/metis/LICENSE.txt
|
||||
- Spectra v0.9.0: Sparse Eigenvalue Computation Toolkit as a Redesigned ARPACK.
|
||||
- Licenced under MPL2, provided at https://github.com/yixuan/spectra
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
# install CCOLAMD headers
|
||||
# install CCOLAMD and SuiteSparse_config headers
|
||||
install(FILES CCOLAMD/Include/ccolamd.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/CCOLAMD)
|
||||
install(FILES SuiteSparse_config/SuiteSparse_config.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/SuiteSparse_config)
|
||||
|
||||
# install Eigen unless System Eigen is used
|
||||
if(NOT GTSAM_USE_SYSTEM_EIGEN)
|
||||
# Find plain .h files
|
||||
file(GLOB_RECURSE eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/Eigen/*.h")
|
||||
|
|
|
@ -0,0 +1,482 @@
|
|||
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef GEN_EIGS_BASE_H
|
||||
#define GEN_EIGS_BASE_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <vector> // std::vector
|
||||
#include <cmath> // std::abs, std::pow, std::sqrt
|
||||
#include <algorithm> // std::min, std::copy
|
||||
#include <complex> // std::complex, std::conj, std::norm, std::abs
|
||||
#include <stdexcept> // std::invalid_argument
|
||||
|
||||
#include "Util/TypeTraits.h"
|
||||
#include "Util/SelectionRule.h"
|
||||
#include "Util/CompInfo.h"
|
||||
#include "Util/SimpleRandom.h"
|
||||
#include "MatOp/internal/ArnoldiOp.h"
|
||||
#include "LinAlg/UpperHessenbergQR.h"
|
||||
#include "LinAlg/DoubleShiftQR.h"
|
||||
#include "LinAlg/UpperHessenbergEigen.h"
|
||||
#include "LinAlg/Arnoldi.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup EigenSolver
|
||||
///
|
||||
/// This is the base class for general eigen solvers, mainly for internal use.
|
||||
/// It is kept here to provide the documentation for member functions of concrete eigen solvers
|
||||
/// such as GenEigsSolver and GenEigsRealShiftSolver.
|
||||
///
|
||||
template <typename Scalar,
|
||||
int SelectionRule,
|
||||
typename OpType,
|
||||
typename BOpType>
|
||||
class GenEigsBase
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> Array;
|
||||
typedef Eigen::Array<bool, Eigen::Dynamic, 1> BoolArray;
|
||||
typedef Eigen::Map<Matrix> MapMat;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
|
||||
typedef std::complex<Scalar> Complex;
|
||||
typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix;
|
||||
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
|
||||
|
||||
typedef ArnoldiOp<Scalar, OpType, BOpType> ArnoldiOpType;
|
||||
typedef Arnoldi<Scalar, ArnoldiOpType> ArnoldiFac;
|
||||
|
||||
protected:
|
||||
// clang-format off
|
||||
OpType* m_op; // object to conduct matrix operation,
|
||||
// e.g. matrix-vector product
|
||||
const Index m_n; // dimension of matrix A
|
||||
const Index m_nev; // number of eigenvalues requested
|
||||
const Index m_ncv; // dimension of Krylov subspace in the Arnoldi method
|
||||
Index m_nmatop; // number of matrix operations called
|
||||
Index m_niter; // number of restarting iterations
|
||||
|
||||
ArnoldiFac m_fac; // Arnoldi factorization
|
||||
|
||||
ComplexVector m_ritz_val; // Ritz values
|
||||
ComplexMatrix m_ritz_vec; // Ritz vectors
|
||||
ComplexVector m_ritz_est; // last row of m_ritz_vec
|
||||
|
||||
private:
|
||||
BoolArray m_ritz_conv; // indicator of the convergence of Ritz values
|
||||
int m_info; // status of the computation
|
||||
|
||||
const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow
|
||||
// ~= 1e-307 for the "double" type
|
||||
const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type
|
||||
const Scalar m_eps23; // m_eps^(2/3), used to test the convergence
|
||||
// clang-format on
|
||||
|
||||
// Real Ritz values calculated from UpperHessenbergEigen have exact zero imaginary part
|
||||
// Complex Ritz values have exact conjugate pairs
|
||||
// So we use exact tests here
|
||||
static bool is_complex(const Complex& v) { return v.imag() != Scalar(0); }
|
||||
static bool is_conj(const Complex& v1, const Complex& v2) { return v1 == Eigen::numext::conj(v2); }
|
||||
|
||||
// Implicitly restarted Arnoldi factorization
|
||||
void restart(Index k)
|
||||
{
|
||||
using std::norm;
|
||||
|
||||
if (k >= m_ncv)
|
||||
return;
|
||||
|
||||
DoubleShiftQR<Scalar> decomp_ds(m_ncv);
|
||||
UpperHessenbergQR<Scalar> decomp_hb(m_ncv);
|
||||
Matrix Q = Matrix::Identity(m_ncv, m_ncv);
|
||||
|
||||
for (Index i = k; i < m_ncv; i++)
|
||||
{
|
||||
if (is_complex(m_ritz_val[i]) && is_conj(m_ritz_val[i], m_ritz_val[i + 1]))
|
||||
{
|
||||
// H - mu * I = Q1 * R1
|
||||
// H <- R1 * Q1 + mu * I = Q1' * H * Q1
|
||||
// H - conj(mu) * I = Q2 * R2
|
||||
// H <- R2 * Q2 + conj(mu) * I = Q2' * H * Q2
|
||||
//
|
||||
// (H - mu * I) * (H - conj(mu) * I) = Q1 * Q2 * R2 * R1 = Q * R
|
||||
const Scalar s = Scalar(2) * m_ritz_val[i].real();
|
||||
const Scalar t = norm(m_ritz_val[i]);
|
||||
|
||||
decomp_ds.compute(m_fac.matrix_H(), s, t);
|
||||
|
||||
// Q -> Q * Qi
|
||||
decomp_ds.apply_YQ(Q);
|
||||
// H -> Q'HQ
|
||||
// Matrix Q = Matrix::Identity(m_ncv, m_ncv);
|
||||
// decomp_ds.apply_YQ(Q);
|
||||
// m_fac_H = Q.transpose() * m_fac_H * Q;
|
||||
m_fac.compress_H(decomp_ds);
|
||||
|
||||
i++;
|
||||
}
|
||||
else
|
||||
{
|
||||
// QR decomposition of H - mu * I, mu is real
|
||||
decomp_hb.compute(m_fac.matrix_H(), m_ritz_val[i].real());
|
||||
|
||||
// Q -> Q * Qi
|
||||
decomp_hb.apply_YQ(Q);
|
||||
// H -> Q'HQ = RQ + mu * I
|
||||
m_fac.compress_H(decomp_hb);
|
||||
}
|
||||
}
|
||||
|
||||
m_fac.compress_V(Q);
|
||||
m_fac.factorize_from(k, m_ncv, m_nmatop);
|
||||
|
||||
retrieve_ritzpair();
|
||||
}
|
||||
|
||||
// Calculates the number of converged Ritz values
|
||||
Index num_converged(Scalar tol)
|
||||
{
|
||||
// thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value
|
||||
Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23);
|
||||
Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm();
|
||||
// Converged "wanted" Ritz values
|
||||
m_ritz_conv = (resid < thresh);
|
||||
|
||||
return m_ritz_conv.cast<Index>().sum();
|
||||
}
|
||||
|
||||
// Returns the adjusted nev for restarting
|
||||
Index nev_adjusted(Index nconv)
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
Index nev_new = m_nev;
|
||||
for (Index i = m_nev; i < m_ncv; i++)
|
||||
if (abs(m_ritz_est[i]) < m_near_0)
|
||||
nev_new++;
|
||||
|
||||
// Adjust nev_new, according to dnaup2.f line 660~674 in ARPACK
|
||||
nev_new += std::min(nconv, (m_ncv - nev_new) / 2);
|
||||
if (nev_new == 1 && m_ncv >= 6)
|
||||
nev_new = m_ncv / 2;
|
||||
else if (nev_new == 1 && m_ncv > 3)
|
||||
nev_new = 2;
|
||||
|
||||
if (nev_new > m_ncv - 2)
|
||||
nev_new = m_ncv - 2;
|
||||
|
||||
// Increase nev by one if ritz_val[nev - 1] and
|
||||
// ritz_val[nev] are conjugate pairs
|
||||
if (is_complex(m_ritz_val[nev_new - 1]) &&
|
||||
is_conj(m_ritz_val[nev_new - 1], m_ritz_val[nev_new]))
|
||||
{
|
||||
nev_new++;
|
||||
}
|
||||
|
||||
return nev_new;
|
||||
}
|
||||
|
||||
// Retrieves and sorts Ritz values and Ritz vectors
|
||||
void retrieve_ritzpair()
|
||||
{
|
||||
UpperHessenbergEigen<Scalar> decomp(m_fac.matrix_H());
|
||||
const ComplexVector& evals = decomp.eigenvalues();
|
||||
ComplexMatrix evecs = decomp.eigenvectors();
|
||||
|
||||
SortEigenvalue<Complex, SelectionRule> sorting(evals.data(), evals.size());
|
||||
std::vector<int> ind = sorting.index();
|
||||
|
||||
// Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively
|
||||
for (Index i = 0; i < m_ncv; i++)
|
||||
{
|
||||
m_ritz_val[i] = evals[ind[i]];
|
||||
m_ritz_est[i] = evecs(m_ncv - 1, ind[i]);
|
||||
}
|
||||
for (Index i = 0; i < m_nev; i++)
|
||||
{
|
||||
m_ritz_vec.col(i).noalias() = evecs.col(ind[i]);
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
// Sorts the first nev Ritz pairs in the specified order
|
||||
// This is used to return the final results
|
||||
virtual void sort_ritzpair(int sort_rule)
|
||||
{
|
||||
// First make sure that we have a valid index vector
|
||||
SortEigenvalue<Complex, LARGEST_MAGN> sorting(m_ritz_val.data(), m_nev);
|
||||
std::vector<int> ind = sorting.index();
|
||||
|
||||
switch (sort_rule)
|
||||
{
|
||||
case LARGEST_MAGN:
|
||||
break;
|
||||
case LARGEST_REAL:
|
||||
{
|
||||
SortEigenvalue<Complex, LARGEST_REAL> sorting(m_ritz_val.data(), m_nev);
|
||||
ind = sorting.index();
|
||||
break;
|
||||
}
|
||||
case LARGEST_IMAG:
|
||||
{
|
||||
SortEigenvalue<Complex, LARGEST_IMAG> sorting(m_ritz_val.data(), m_nev);
|
||||
ind = sorting.index();
|
||||
break;
|
||||
}
|
||||
case SMALLEST_MAGN:
|
||||
{
|
||||
SortEigenvalue<Complex, SMALLEST_MAGN> sorting(m_ritz_val.data(), m_nev);
|
||||
ind = sorting.index();
|
||||
break;
|
||||
}
|
||||
case SMALLEST_REAL:
|
||||
{
|
||||
SortEigenvalue<Complex, SMALLEST_REAL> sorting(m_ritz_val.data(), m_nev);
|
||||
ind = sorting.index();
|
||||
break;
|
||||
}
|
||||
case SMALLEST_IMAG:
|
||||
{
|
||||
SortEigenvalue<Complex, SMALLEST_IMAG> sorting(m_ritz_val.data(), m_nev);
|
||||
ind = sorting.index();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw std::invalid_argument("unsupported sorting rule");
|
||||
}
|
||||
|
||||
ComplexVector new_ritz_val(m_ncv);
|
||||
ComplexMatrix new_ritz_vec(m_ncv, m_nev);
|
||||
BoolArray new_ritz_conv(m_nev);
|
||||
|
||||
for (Index i = 0; i < m_nev; i++)
|
||||
{
|
||||
new_ritz_val[i] = m_ritz_val[ind[i]];
|
||||
new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]);
|
||||
new_ritz_conv[i] = m_ritz_conv[ind[i]];
|
||||
}
|
||||
|
||||
m_ritz_val.swap(new_ritz_val);
|
||||
m_ritz_vec.swap(new_ritz_vec);
|
||||
m_ritz_conv.swap(new_ritz_conv);
|
||||
}
|
||||
|
||||
public:
|
||||
/// \cond
|
||||
|
||||
GenEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) :
|
||||
m_op(op),
|
||||
m_n(m_op->rows()),
|
||||
m_nev(nev),
|
||||
m_ncv(ncv > m_n ? m_n : ncv),
|
||||
m_nmatop(0),
|
||||
m_niter(0),
|
||||
m_fac(ArnoldiOpType(op, Bop), m_ncv),
|
||||
m_info(NOT_COMPUTED),
|
||||
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
|
||||
m_eps(Eigen::NumTraits<Scalar>::epsilon()),
|
||||
m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3))
|
||||
{
|
||||
if (nev < 1 || nev > m_n - 2)
|
||||
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 2, n is the size of matrix");
|
||||
|
||||
if (ncv < nev + 2 || ncv > m_n)
|
||||
throw std::invalid_argument("ncv must satisfy nev + 2 <= ncv <= n, n is the size of matrix");
|
||||
}
|
||||
|
||||
///
|
||||
/// Virtual destructor
|
||||
///
|
||||
virtual ~GenEigsBase() {}
|
||||
|
||||
/// \endcond
|
||||
|
||||
///
|
||||
/// Initializes the solver by providing an initial residual vector.
|
||||
///
|
||||
/// \param init_resid Pointer to the initial residual vector.
|
||||
///
|
||||
/// **Spectra** (and also **ARPACK**) uses an iterative algorithm
|
||||
/// to find eigenvalues. This function allows the user to provide the initial
|
||||
/// residual vector.
|
||||
///
|
||||
void init(const Scalar* init_resid)
|
||||
{
|
||||
// Reset all matrices/vectors to zero
|
||||
m_ritz_val.resize(m_ncv);
|
||||
m_ritz_vec.resize(m_ncv, m_nev);
|
||||
m_ritz_est.resize(m_ncv);
|
||||
m_ritz_conv.resize(m_nev);
|
||||
|
||||
m_ritz_val.setZero();
|
||||
m_ritz_vec.setZero();
|
||||
m_ritz_est.setZero();
|
||||
m_ritz_conv.setZero();
|
||||
|
||||
m_nmatop = 0;
|
||||
m_niter = 0;
|
||||
|
||||
// Initialize the Arnoldi factorization
|
||||
MapConstVec v0(init_resid, m_n);
|
||||
m_fac.init(v0, m_nmatop);
|
||||
}
|
||||
|
||||
///
|
||||
/// Initializes the solver by providing a random initial residual vector.
|
||||
///
|
||||
/// This overloaded function generates a random initial residual vector
|
||||
/// (with a fixed random seed) for the algorithm. Elements in the vector
|
||||
/// follow independent Uniform(-0.5, 0.5) distribution.
|
||||
///
|
||||
void init()
|
||||
{
|
||||
SimpleRandom<Scalar> rng(0);
|
||||
Vector init_resid = rng.random_vec(m_n);
|
||||
init(init_resid.data());
|
||||
}
|
||||
|
||||
///
|
||||
/// Conducts the major computation procedure.
|
||||
///
|
||||
/// \param maxit Maximum number of iterations allowed in the algorithm.
|
||||
/// \param tol Precision parameter for the calculated eigenvalues.
|
||||
/// \param sort_rule Rule to sort the eigenvalues and eigenvectors.
|
||||
/// Supported values are
|
||||
/// `Spectra::LARGEST_MAGN`, `Spectra::LARGEST_REAL`,
|
||||
/// `Spectra::LARGEST_IMAG`, `Spectra::SMALLEST_MAGN`,
|
||||
/// `Spectra::SMALLEST_REAL` and `Spectra::SMALLEST_IMAG`,
|
||||
/// for example `LARGEST_MAGN` indicates that eigenvalues
|
||||
/// with largest magnitude come first.
|
||||
/// Note that this argument is only used to
|
||||
/// **sort** the final result, and the **selection** rule
|
||||
/// (e.g. selecting the largest or smallest eigenvalues in the
|
||||
/// full spectrum) is specified by the template parameter
|
||||
/// `SelectionRule` of GenEigsSolver.
|
||||
///
|
||||
/// \return Number of converged eigenvalues.
|
||||
///
|
||||
Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_MAGN)
|
||||
{
|
||||
// The m-step Arnoldi factorization
|
||||
m_fac.factorize_from(1, m_ncv, m_nmatop);
|
||||
retrieve_ritzpair();
|
||||
// Restarting
|
||||
Index i, nconv = 0, nev_adj;
|
||||
for (i = 0; i < maxit; i++)
|
||||
{
|
||||
nconv = num_converged(tol);
|
||||
if (nconv >= m_nev)
|
||||
break;
|
||||
|
||||
nev_adj = nev_adjusted(nconv);
|
||||
restart(nev_adj);
|
||||
}
|
||||
// Sorting results
|
||||
sort_ritzpair(sort_rule);
|
||||
|
||||
m_niter += i + 1;
|
||||
m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING;
|
||||
|
||||
return std::min(m_nev, nconv);
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the status of the computation.
|
||||
/// The full list of enumeration values can be found in \ref Enumerations.
|
||||
///
|
||||
int info() const { return m_info; }
|
||||
|
||||
///
|
||||
/// Returns the number of iterations used in the computation.
|
||||
///
|
||||
Index num_iterations() const { return m_niter; }
|
||||
|
||||
///
|
||||
/// Returns the number of matrix operations used in the computation.
|
||||
///
|
||||
Index num_operations() const { return m_nmatop; }
|
||||
|
||||
///
|
||||
/// Returns the converged eigenvalues.
|
||||
///
|
||||
/// \return A complex-valued vector containing the eigenvalues.
|
||||
/// Returned vector type will be `Eigen::Vector<std::complex<Scalar>, ...>`, depending on
|
||||
/// the template parameter `Scalar` defined.
|
||||
///
|
||||
ComplexVector eigenvalues() const
|
||||
{
|
||||
const Index nconv = m_ritz_conv.cast<Index>().sum();
|
||||
ComplexVector res(nconv);
|
||||
|
||||
if (!nconv)
|
||||
return res;
|
||||
|
||||
Index j = 0;
|
||||
for (Index i = 0; i < m_nev; i++)
|
||||
{
|
||||
if (m_ritz_conv[i])
|
||||
{
|
||||
res[j] = m_ritz_val[i];
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the eigenvectors associated with the converged eigenvalues.
|
||||
///
|
||||
/// \param nvec The number of eigenvectors to return.
|
||||
///
|
||||
/// \return A complex-valued matrix containing the eigenvectors.
|
||||
/// Returned matrix type will be `Eigen::Matrix<std::complex<Scalar>, ...>`,
|
||||
/// depending on the template parameter `Scalar` defined.
|
||||
///
|
||||
ComplexMatrix eigenvectors(Index nvec) const
|
||||
{
|
||||
const Index nconv = m_ritz_conv.cast<Index>().sum();
|
||||
nvec = std::min(nvec, nconv);
|
||||
ComplexMatrix res(m_n, nvec);
|
||||
|
||||
if (!nvec)
|
||||
return res;
|
||||
|
||||
ComplexMatrix ritz_vec_conv(m_ncv, nvec);
|
||||
Index j = 0;
|
||||
for (Index i = 0; i < m_nev && j < nvec; i++)
|
||||
{
|
||||
if (m_ritz_conv[i])
|
||||
{
|
||||
ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i);
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
res.noalias() = m_fac.matrix_V() * ritz_vec_conv;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns all converged eigenvectors.
|
||||
///
|
||||
ComplexMatrix eigenvectors() const
|
||||
{
|
||||
return eigenvectors(m_nev);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // GEN_EIGS_BASE_H
|
|
@ -0,0 +1,157 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
|
||||
#define GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
#include "GenEigsBase.h"
|
||||
#include "Util/SelectionRule.h"
|
||||
#include "MatOp/DenseGenComplexShiftSolve.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup EigenSolver
|
||||
///
|
||||
/// This class implements the eigen solver for general real matrices with
|
||||
/// a complex shift value in the **shift-and-invert mode**. The background
|
||||
/// knowledge of the shift-and-invert mode can be found in the documentation
|
||||
/// of the SymEigsShiftSolver class.
|
||||
///
|
||||
/// \tparam Scalar The element type of the matrix.
|
||||
/// Currently supported types are `float`, `double` and `long double`.
|
||||
/// \tparam SelectionRule An enumeration value indicating the selection rule of
|
||||
/// the shifted-and-inverted eigenvalues.
|
||||
/// The full list of enumeration values can be found in
|
||||
/// \ref Enumerations.
|
||||
/// \tparam OpType The name of the matrix operation class. Users could either
|
||||
/// use the DenseGenComplexShiftSolve wrapper class, or define their
|
||||
/// own that implements all the public member functions as in
|
||||
/// DenseGenComplexShiftSolve.
|
||||
///
|
||||
template <typename Scalar = double,
|
||||
int SelectionRule = LARGEST_MAGN,
|
||||
typename OpType = DenseGenComplexShiftSolve<double> >
|
||||
class GenEigsComplexShiftSolver : public GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef std::complex<Scalar> Complex;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
|
||||
|
||||
const Scalar m_sigmar;
|
||||
const Scalar m_sigmai;
|
||||
|
||||
// First transform back the Ritz values, and then sort
|
||||
void sort_ritzpair(int sort_rule)
|
||||
{
|
||||
using std::abs;
|
||||
using std::sqrt;
|
||||
using std::norm;
|
||||
|
||||
// The eigenvalues we get from the iteration is
|
||||
// nu = 0.5 * (1 / (lambda - sigma) + 1 / (lambda - conj(sigma)))
|
||||
// So the eigenvalues of the original problem is
|
||||
// 1 \pm sqrt(1 - 4 * nu^2 * sigmai^2)
|
||||
// lambda = sigmar + -----------------------------------
|
||||
// 2 * nu
|
||||
// We need to pick the correct root
|
||||
// Let (lambdaj, vj) be the j-th eigen pair, then A * vj = lambdaj * vj
|
||||
// and inv(A - r * I) * vj = 1 / (lambdaj - r) * vj
|
||||
// where r is any shift value.
|
||||
// We can use this identity to determine lambdaj
|
||||
//
|
||||
// op(v) computes Re(inv(A - r * I) * v) for any real v
|
||||
// If r is real, then op(v) is also real. Let a = Re(vj), b = Im(vj),
|
||||
// then op(vj) = op(a) + op(b) * i
|
||||
// By comparing op(vj) and [1 / (lambdaj - r) * vj], we can determine
|
||||
// which one is the correct root
|
||||
|
||||
// Select a random shift value
|
||||
SimpleRandom<Scalar> rng(0);
|
||||
const Scalar shiftr = rng.random() * m_sigmar + rng.random();
|
||||
const Complex shift = Complex(shiftr, Scalar(0));
|
||||
this->m_op->set_shift(shiftr, Scalar(0));
|
||||
|
||||
// Calculate inv(A - r * I) * vj
|
||||
Vector v_real(this->m_n), v_imag(this->m_n), OPv_real(this->m_n), OPv_imag(this->m_n);
|
||||
const Scalar eps = Eigen::NumTraits<Scalar>::epsilon();
|
||||
for (Index i = 0; i < this->m_nev; i++)
|
||||
{
|
||||
v_real.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).real();
|
||||
v_imag.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).imag();
|
||||
this->m_op->perform_op(v_real.data(), OPv_real.data());
|
||||
this->m_op->perform_op(v_imag.data(), OPv_imag.data());
|
||||
|
||||
// Two roots computed from the quadratic equation
|
||||
const Complex nu = this->m_ritz_val[i];
|
||||
const Complex root_part1 = m_sigmar + Scalar(0.5) / nu;
|
||||
const Complex root_part2 = Scalar(0.5) * sqrt(Scalar(1) - Scalar(4) * m_sigmai * m_sigmai * (nu * nu)) / nu;
|
||||
const Complex root1 = root_part1 + root_part2;
|
||||
const Complex root2 = root_part1 - root_part2;
|
||||
|
||||
// Test roots
|
||||
Scalar err1 = Scalar(0), err2 = Scalar(0);
|
||||
for (int k = 0; k < this->m_n; k++)
|
||||
{
|
||||
const Complex rhs1 = Complex(v_real[k], v_imag[k]) / (root1 - shift);
|
||||
const Complex rhs2 = Complex(v_real[k], v_imag[k]) / (root2 - shift);
|
||||
const Complex OPv = Complex(OPv_real[k], OPv_imag[k]);
|
||||
err1 += norm(OPv - rhs1);
|
||||
err2 += norm(OPv - rhs2);
|
||||
}
|
||||
|
||||
const Complex lambdaj = (err1 < err2) ? root1 : root2;
|
||||
this->m_ritz_val[i] = lambdaj;
|
||||
|
||||
if (abs(Eigen::numext::imag(lambdaj)) > eps)
|
||||
{
|
||||
this->m_ritz_val[i + 1] = Eigen::numext::conj(lambdaj);
|
||||
i++;
|
||||
}
|
||||
else
|
||||
{
|
||||
this->m_ritz_val[i] = Complex(Eigen::numext::real(lambdaj), Scalar(0));
|
||||
}
|
||||
}
|
||||
|
||||
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>::sort_ritzpair(sort_rule);
|
||||
}
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create a eigen solver object using the shift-and-invert mode.
|
||||
///
|
||||
/// \param op Pointer to the matrix operation object. This class should implement
|
||||
/// the complex shift-solve operation of \f$A\f$: calculating
|
||||
/// \f$\mathrm{Re}\{(A-\sigma I)^{-1}v\}\f$ for any vector \f$v\f$. Users could either
|
||||
/// create the object from the DenseGenComplexShiftSolve wrapper class, or
|
||||
/// define their own that implements all the public member functions
|
||||
/// as in DenseGenComplexShiftSolve.
|
||||
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
|
||||
/// where \f$n\f$ is the size of matrix.
|
||||
/// \param ncv Parameter that controls the convergence speed of the algorithm.
|
||||
/// Typically a larger `ncv` means faster convergence, but it may
|
||||
/// also result in greater memory use and more matrix operations
|
||||
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
|
||||
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
|
||||
/// \param sigmar The real part of the shift.
|
||||
/// \param sigmai The imaginary part of the shift.
|
||||
///
|
||||
GenEigsComplexShiftSolver(OpType* op, Index nev, Index ncv, const Scalar& sigmar, const Scalar& sigmai) :
|
||||
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv),
|
||||
m_sigmar(sigmar), m_sigmai(sigmai)
|
||||
{
|
||||
this->m_op->set_shift(m_sigmar, m_sigmai);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
|
|
@ -0,0 +1,89 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef GEN_EIGS_REAL_SHIFT_SOLVER_H
|
||||
#define GEN_EIGS_REAL_SHIFT_SOLVER_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
#include "GenEigsBase.h"
|
||||
#include "Util/SelectionRule.h"
|
||||
#include "MatOp/DenseGenRealShiftSolve.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup EigenSolver
|
||||
///
|
||||
/// This class implements the eigen solver for general real matrices with
|
||||
/// a real shift value in the **shift-and-invert mode**. The background
|
||||
/// knowledge of the shift-and-invert mode can be found in the documentation
|
||||
/// of the SymEigsShiftSolver class.
|
||||
///
|
||||
/// \tparam Scalar The element type of the matrix.
|
||||
/// Currently supported types are `float`, `double` and `long double`.
|
||||
/// \tparam SelectionRule An enumeration value indicating the selection rule of
|
||||
/// the shifted-and-inverted eigenvalues.
|
||||
/// The full list of enumeration values can be found in
|
||||
/// \ref Enumerations.
|
||||
/// \tparam OpType The name of the matrix operation class. Users could either
|
||||
/// use the wrapper classes such as DenseGenRealShiftSolve and
|
||||
/// SparseGenRealShiftSolve, or define their
|
||||
/// own that implements all the public member functions as in
|
||||
/// DenseGenRealShiftSolve.
|
||||
///
|
||||
template <typename Scalar = double,
|
||||
int SelectionRule = LARGEST_MAGN,
|
||||
typename OpType = DenseGenRealShiftSolve<double> >
|
||||
class GenEigsRealShiftSolver : public GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef std::complex<Scalar> Complex;
|
||||
typedef Eigen::Array<Complex, Eigen::Dynamic, 1> ComplexArray;
|
||||
|
||||
const Scalar m_sigma;
|
||||
|
||||
// First transform back the Ritz values, and then sort
|
||||
void sort_ritzpair(int sort_rule)
|
||||
{
|
||||
// The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma)
|
||||
// So the eigenvalues of the original problem is lambda = 1 / nu + sigma
|
||||
ComplexArray ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma;
|
||||
this->m_ritz_val.head(this->m_nev) = ritz_val_org;
|
||||
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>::sort_ritzpair(sort_rule);
|
||||
}
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create a eigen solver object using the shift-and-invert mode.
|
||||
///
|
||||
/// \param op Pointer to the matrix operation object. This class should implement
|
||||
/// the shift-solve operation of \f$A\f$: calculating
|
||||
/// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either
|
||||
/// create the object from the wrapper class such as DenseGenRealShiftSolve, or
|
||||
/// define their own that implements all the public member functions
|
||||
/// as in DenseGenRealShiftSolve.
|
||||
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
|
||||
/// where \f$n\f$ is the size of matrix.
|
||||
/// \param ncv Parameter that controls the convergence speed of the algorithm.
|
||||
/// Typically a larger `ncv` means faster convergence, but it may
|
||||
/// also result in greater memory use and more matrix operations
|
||||
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
|
||||
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
|
||||
/// \param sigma The real-valued shift.
|
||||
///
|
||||
GenEigsRealShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) :
|
||||
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv),
|
||||
m_sigma(sigma)
|
||||
{
|
||||
this->m_op->set_shift(m_sigma);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // GEN_EIGS_REAL_SHIFT_SOLVER_H
|
|
@ -0,0 +1,158 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef GEN_EIGS_SOLVER_H
|
||||
#define GEN_EIGS_SOLVER_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
#include "GenEigsBase.h"
|
||||
#include "Util/SelectionRule.h"
|
||||
#include "MatOp/DenseGenMatProd.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup EigenSolver
|
||||
///
|
||||
/// This class implements the eigen solver for general real matrices, i.e.,
|
||||
/// to solve \f$Ax=\lambda x\f$ for a possibly non-symmetric \f$A\f$ matrix.
|
||||
///
|
||||
/// Most of the background information documented in the SymEigsSolver class
|
||||
/// also applies to the GenEigsSolver class here, except that the eigenvalues
|
||||
/// and eigenvectors of a general matrix can now be complex-valued.
|
||||
///
|
||||
/// \tparam Scalar The element type of the matrix.
|
||||
/// Currently supported types are `float`, `double` and `long double`.
|
||||
/// \tparam SelectionRule An enumeration value indicating the selection rule of
|
||||
/// the requested eigenvalues, for example `LARGEST_MAGN`
|
||||
/// to retrieve eigenvalues with the largest magnitude.
|
||||
/// The full list of enumeration values can be found in
|
||||
/// \ref Enumerations.
|
||||
/// \tparam OpType The name of the matrix operation class. Users could either
|
||||
/// use the wrapper classes such as DenseGenMatProd and
|
||||
/// SparseGenMatProd, or define their
|
||||
/// own that implements all the public member functions as in
|
||||
/// DenseGenMatProd.
|
||||
///
|
||||
/// An example that illustrates the usage of GenEigsSolver is give below:
|
||||
///
|
||||
/// \code{.cpp}
|
||||
/// #include <Eigen/Core>
|
||||
/// #include <Spectra/GenEigsSolver.h>
|
||||
/// // <Spectra/MatOp/DenseGenMatProd.h> is implicitly included
|
||||
/// #include <iostream>
|
||||
///
|
||||
/// using namespace Spectra;
|
||||
///
|
||||
/// int main()
|
||||
/// {
|
||||
/// // We are going to calculate the eigenvalues of M
|
||||
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(10, 10);
|
||||
///
|
||||
/// // Construct matrix operation object using the wrapper class
|
||||
/// DenseGenMatProd<double> op(M);
|
||||
///
|
||||
/// // Construct eigen solver object, requesting the largest
|
||||
/// // (in magnitude, or norm) three eigenvalues
|
||||
/// GenEigsSolver< double, LARGEST_MAGN, DenseGenMatProd<double> > eigs(&op, 3, 6);
|
||||
///
|
||||
/// // Initialize and compute
|
||||
/// eigs.init();
|
||||
/// int nconv = eigs.compute();
|
||||
///
|
||||
/// // Retrieve results
|
||||
/// Eigen::VectorXcd evalues;
|
||||
/// if(eigs.info() == SUCCESSFUL)
|
||||
/// evalues = eigs.eigenvalues();
|
||||
///
|
||||
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
|
||||
///
|
||||
/// return 0;
|
||||
/// }
|
||||
/// \endcode
|
||||
///
|
||||
/// And also an example for sparse matrices:
|
||||
///
|
||||
/// \code{.cpp}
|
||||
/// #include <Eigen/Core>
|
||||
/// #include <Eigen/SparseCore>
|
||||
/// #include <Spectra/GenEigsSolver.h>
|
||||
/// #include <Spectra/MatOp/SparseGenMatProd.h>
|
||||
/// #include <iostream>
|
||||
///
|
||||
/// using namespace Spectra;
|
||||
///
|
||||
/// int main()
|
||||
/// {
|
||||
/// // A band matrix with 1 on the main diagonal, 2 on the below-main subdiagonal,
|
||||
/// // and 3 on the above-main subdiagonal
|
||||
/// const int n = 10;
|
||||
/// Eigen::SparseMatrix<double> M(n, n);
|
||||
/// M.reserve(Eigen::VectorXi::Constant(n, 3));
|
||||
/// for(int i = 0; i < n; i++)
|
||||
/// {
|
||||
/// M.insert(i, i) = 1.0;
|
||||
/// if(i > 0)
|
||||
/// M.insert(i - 1, i) = 3.0;
|
||||
/// if(i < n - 1)
|
||||
/// M.insert(i + 1, i) = 2.0;
|
||||
/// }
|
||||
///
|
||||
/// // Construct matrix operation object using the wrapper class SparseGenMatProd
|
||||
/// SparseGenMatProd<double> op(M);
|
||||
///
|
||||
/// // Construct eigen solver object, requesting the largest three eigenvalues
|
||||
/// GenEigsSolver< double, LARGEST_MAGN, SparseGenMatProd<double> > eigs(&op, 3, 6);
|
||||
///
|
||||
/// // Initialize and compute
|
||||
/// eigs.init();
|
||||
/// int nconv = eigs.compute();
|
||||
///
|
||||
/// // Retrieve results
|
||||
/// Eigen::VectorXcd evalues;
|
||||
/// if(eigs.info() == SUCCESSFUL)
|
||||
/// evalues = eigs.eigenvalues();
|
||||
///
|
||||
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
|
||||
///
|
||||
/// return 0;
|
||||
/// }
|
||||
/// \endcode
|
||||
template <typename Scalar = double,
|
||||
int SelectionRule = LARGEST_MAGN,
|
||||
typename OpType = DenseGenMatProd<double> >
|
||||
class GenEigsSolver : public GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create a solver object.
|
||||
///
|
||||
/// \param op Pointer to the matrix operation object, which should implement
|
||||
/// the matrix-vector multiplication operation of \f$A\f$:
|
||||
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
|
||||
/// create the object from the wrapper class such as DenseGenMatProd, or
|
||||
/// define their own that implements all the public member functions
|
||||
/// as in DenseGenMatProd.
|
||||
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
|
||||
/// where \f$n\f$ is the size of matrix.
|
||||
/// \param ncv Parameter that controls the convergence speed of the algorithm.
|
||||
/// Typically a larger `ncv` means faster convergence, but it may
|
||||
/// also result in greater memory use and more matrix operations
|
||||
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
|
||||
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
|
||||
///
|
||||
GenEigsSolver(OpType* op, Index nev, Index ncv) :
|
||||
GenEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv)
|
||||
{}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // GEN_EIGS_SOLVER_H
|
|
@ -0,0 +1,284 @@
|
|||
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef ARNOLDI_H
|
||||
#define ARNOLDI_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <cmath> // std::sqrt
|
||||
#include <stdexcept> // std::invalid_argument
|
||||
#include <sstream> // std::stringstream
|
||||
|
||||
#include "../MatOp/internal/ArnoldiOp.h"
|
||||
#include "../Util/TypeTraits.h"
|
||||
#include "../Util/SimpleRandom.h"
|
||||
#include "UpperHessenbergQR.h"
|
||||
#include "DoubleShiftQR.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
// Arnoldi factorization A * V = V * H + f * e'
|
||||
// A: n x n
|
||||
// V: n x k
|
||||
// H: k x k
|
||||
// f: n x 1
|
||||
// e: [0, ..., 0, 1]
|
||||
// V and H are allocated of dimension m, so the maximum value of k is m
|
||||
template <typename Scalar, typename ArnoldiOpType>
|
||||
class Arnoldi
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<Matrix> MapMat;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef Eigen::Map<const Matrix> MapConstMat;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
|
||||
protected:
|
||||
// clang-format off
|
||||
ArnoldiOpType m_op; // Operators for the Arnoldi factorization
|
||||
|
||||
const Index m_n; // dimension of A
|
||||
const Index m_m; // maximum dimension of subspace V
|
||||
Index m_k; // current dimension of subspace V
|
||||
|
||||
Matrix m_fac_V; // V matrix in the Arnoldi factorization
|
||||
Matrix m_fac_H; // H matrix in the Arnoldi factorization
|
||||
Vector m_fac_f; // residual in the Arnoldi factorization
|
||||
Scalar m_beta; // ||f||, B-norm of f
|
||||
|
||||
const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow
|
||||
// ~= 1e-307 for the "double" type
|
||||
const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type
|
||||
// clang-format on
|
||||
|
||||
// Given orthonormal basis functions V, find a nonzero vector f such that V'Bf = 0
|
||||
// Assume that f has been properly allocated
|
||||
void expand_basis(MapConstMat& V, const Index seed, Vector& f, Scalar& fnorm)
|
||||
{
|
||||
using std::sqrt;
|
||||
|
||||
const Scalar thresh = m_eps * sqrt(Scalar(m_n));
|
||||
Vector Vf(V.cols());
|
||||
for (Index iter = 0; iter < 5; iter++)
|
||||
{
|
||||
// Randomly generate a new vector and orthogonalize it against V
|
||||
SimpleRandom<Scalar> rng(seed + 123 * iter);
|
||||
f.noalias() = rng.random_vec(m_n);
|
||||
// f <- f - V * V'Bf, so that f is orthogonal to V in B-norm
|
||||
m_op.trans_product(V, f, Vf);
|
||||
f.noalias() -= V * Vf;
|
||||
// fnorm <- ||f||
|
||||
fnorm = m_op.norm(f);
|
||||
|
||||
// If fnorm is too close to zero, we try a new random vector,
|
||||
// otherwise return the result
|
||||
if (fnorm >= thresh)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
Arnoldi(const ArnoldiOpType& op, Index m) :
|
||||
m_op(op), m_n(op.rows()), m_m(m), m_k(0),
|
||||
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
|
||||
m_eps(Eigen::NumTraits<Scalar>::epsilon())
|
||||
{}
|
||||
|
||||
virtual ~Arnoldi() {}
|
||||
|
||||
// Const-reference to internal structures
|
||||
const Matrix& matrix_V() const { return m_fac_V; }
|
||||
const Matrix& matrix_H() const { return m_fac_H; }
|
||||
const Vector& vector_f() const { return m_fac_f; }
|
||||
Scalar f_norm() const { return m_beta; }
|
||||
Index subspace_dim() const { return m_k; }
|
||||
|
||||
// Initialize with an operator and an initial vector
|
||||
void init(MapConstVec& v0, Index& op_counter)
|
||||
{
|
||||
m_fac_V.resize(m_n, m_m);
|
||||
m_fac_H.resize(m_m, m_m);
|
||||
m_fac_f.resize(m_n);
|
||||
m_fac_H.setZero();
|
||||
|
||||
// Verify the initial vector
|
||||
const Scalar v0norm = m_op.norm(v0);
|
||||
if (v0norm < m_near_0)
|
||||
throw std::invalid_argument("initial residual vector cannot be zero");
|
||||
|
||||
// Points to the first column of V
|
||||
MapVec v(m_fac_V.data(), m_n);
|
||||
|
||||
// Normalize
|
||||
v.noalias() = v0 / v0norm;
|
||||
|
||||
// Compute H and f
|
||||
Vector w(m_n);
|
||||
m_op.perform_op(v.data(), w.data());
|
||||
op_counter++;
|
||||
|
||||
m_fac_H(0, 0) = m_op.inner_product(v, w);
|
||||
m_fac_f.noalias() = w - v * m_fac_H(0, 0);
|
||||
|
||||
// In some cases f is zero in exact arithmetics, but due to rounding errors
|
||||
// it may contain tiny fluctuations. When this happens, we force f to be zero
|
||||
if (m_fac_f.cwiseAbs().maxCoeff() < m_eps)
|
||||
{
|
||||
m_fac_f.setZero();
|
||||
m_beta = Scalar(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_beta = m_op.norm(m_fac_f);
|
||||
}
|
||||
|
||||
// Indicate that this is a step-1 factorization
|
||||
m_k = 1;
|
||||
}
|
||||
|
||||
// Arnoldi factorization starting from step-k
|
||||
virtual void factorize_from(Index from_k, Index to_m, Index& op_counter)
|
||||
{
|
||||
using std::sqrt;
|
||||
|
||||
if (to_m <= from_k)
|
||||
return;
|
||||
|
||||
if (from_k > m_k)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << "Arnoldi: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")";
|
||||
throw std::invalid_argument(msg.str());
|
||||
}
|
||||
|
||||
const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n));
|
||||
|
||||
// Pre-allocate vectors
|
||||
Vector Vf(to_m);
|
||||
Vector w(m_n);
|
||||
|
||||
// Keep the upperleft k x k submatrix of H and set other elements to 0
|
||||
m_fac_H.rightCols(m_m - from_k).setZero();
|
||||
m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero();
|
||||
|
||||
for (Index i = from_k; i <= to_m - 1; i++)
|
||||
{
|
||||
bool restart = false;
|
||||
// If beta = 0, then the next V is not full rank
|
||||
// We need to generate a new residual vector that is orthogonal
|
||||
// to the current V, which we call a restart
|
||||
if (m_beta < m_near_0)
|
||||
{
|
||||
MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns
|
||||
expand_basis(V, 2 * i, m_fac_f, m_beta);
|
||||
restart = true;
|
||||
}
|
||||
|
||||
// v <- f / ||f||
|
||||
m_fac_V.col(i).noalias() = m_fac_f / m_beta; // The (i+1)-th column
|
||||
|
||||
// Note that H[i+1, i] equals to the unrestarted beta
|
||||
m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta;
|
||||
|
||||
// w <- A * v, v = m_fac_V.col(i)
|
||||
m_op.perform_op(&m_fac_V(0, i), w.data());
|
||||
op_counter++;
|
||||
|
||||
const Index i1 = i + 1;
|
||||
// First i+1 columns of V
|
||||
MapConstMat Vs(m_fac_V.data(), m_n, i1);
|
||||
// h = m_fac_H(0:i, i)
|
||||
MapVec h(&m_fac_H(0, i), i1);
|
||||
// h <- V'Bw
|
||||
m_op.trans_product(Vs, w, h);
|
||||
|
||||
// f <- w - V * h
|
||||
m_fac_f.noalias() = w - Vs * h;
|
||||
m_beta = m_op.norm(m_fac_f);
|
||||
|
||||
if (m_beta > Scalar(0.717) * m_op.norm(h))
|
||||
continue;
|
||||
|
||||
// f/||f|| is going to be the next column of V, so we need to test
|
||||
// whether V'B(f/||f||) ~= 0
|
||||
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
|
||||
Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
|
||||
// If not, iteratively correct the residual
|
||||
int count = 0;
|
||||
while (count < 5 && ortho_err > m_eps * m_beta)
|
||||
{
|
||||
// There is an edge case: when beta=||f|| is close to zero, f mostly consists
|
||||
// of noises of rounding errors, so the test [ortho_err < eps * beta] is very
|
||||
// likely to fail. In particular, if beta=0, then the test is ensured to fail.
|
||||
// Hence when this happens, we force f to be zero, and then restart in the
|
||||
// next iteration.
|
||||
if (m_beta < beta_thresh)
|
||||
{
|
||||
m_fac_f.setZero();
|
||||
m_beta = Scalar(0);
|
||||
break;
|
||||
}
|
||||
|
||||
// f <- f - V * Vf
|
||||
m_fac_f.noalias() -= Vs * Vf.head(i1);
|
||||
// h <- h + Vf
|
||||
h.noalias() += Vf.head(i1);
|
||||
// beta <- ||f||
|
||||
m_beta = m_op.norm(m_fac_f);
|
||||
|
||||
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
|
||||
ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
// Indicate that this is a step-m factorization
|
||||
m_k = to_m;
|
||||
}
|
||||
|
||||
// Apply H -> Q'HQ, where Q is from a double shift QR decomposition
|
||||
void compress_H(const DoubleShiftQR<Scalar>& decomp)
|
||||
{
|
||||
decomp.matrix_QtHQ(m_fac_H);
|
||||
m_k -= 2;
|
||||
}
|
||||
|
||||
// Apply H -> Q'HQ, where Q is from an upper Hessenberg QR decomposition
|
||||
void compress_H(const UpperHessenbergQR<Scalar>& decomp)
|
||||
{
|
||||
decomp.matrix_QtHQ(m_fac_H);
|
||||
m_k--;
|
||||
}
|
||||
|
||||
// Apply V -> VQ and compute the new f.
|
||||
// Should be called after compress_H(), since m_k is updated there.
|
||||
// Only need to update the first k+1 columns of V
|
||||
// The first (m - k + i) elements of the i-th column of Q are non-zero,
|
||||
// and the rest are zero
|
||||
void compress_V(const Matrix& Q)
|
||||
{
|
||||
Matrix Vs(m_n, m_k + 1);
|
||||
for (Index i = 0; i < m_k; i++)
|
||||
{
|
||||
const Index nnz = m_m - m_k + i + 1;
|
||||
MapConstVec q(&Q(0, i), nnz);
|
||||
Vs.col(i).noalias() = m_fac_V.leftCols(nnz) * q;
|
||||
}
|
||||
Vs.col(m_k).noalias() = m_fac_V * Q.col(m_k);
|
||||
m_fac_V.leftCols(m_k + 1).noalias() = Vs;
|
||||
|
||||
Vector fk = m_fac_f * Q(m_m - 1, m_k - 1) + m_fac_V.col(m_k) * m_fac_H(m_k, m_k - 1);
|
||||
m_fac_f.swap(fk);
|
||||
m_beta = m_op.norm(m_fac_f);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // ARNOLDI_H
|
|
@ -0,0 +1,530 @@
|
|||
// Copyright (C) 2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef BK_LDLT_H
|
||||
#define BK_LDLT_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <vector>
|
||||
#include <stdexcept>
|
||||
|
||||
#include "../Util/CompInfo.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
// Bunch-Kaufman LDLT decomposition
|
||||
// References:
|
||||
// 1. Bunch, J. R., & Kaufman, L. (1977). Some stable methods for calculating inertia and solving symmetric linear systems.
|
||||
// Mathematics of computation, 31(137), 163-179.
|
||||
// 2. Golub, G. H., & Van Loan, C. F. (2012). Matrix computations (Vol. 3). JHU press. Section 4.4.
|
||||
// 3. Bunch-Parlett diagonal pivoting <http://oz.nthu.edu.tw/~d947207/Chap13_GE3.ppt>
|
||||
// 4. Ashcraft, C., Grimes, R. G., & Lewis, J. G. (1998). Accurate symmetric indefinite linear equation solvers.
|
||||
// SIAM Journal on Matrix Analysis and Applications, 20(2), 513-561.
|
||||
template <typename Scalar = double>
|
||||
class BKLDLT
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
|
||||
typedef Eigen::Matrix<Index, Eigen::Dynamic, 1> IntVector;
|
||||
typedef Eigen::Ref<Vector> GenericVector;
|
||||
typedef Eigen::Ref<Matrix> GenericMatrix;
|
||||
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
|
||||
typedef const Eigen::Ref<const Vector> ConstGenericVector;
|
||||
|
||||
Index m_n;
|
||||
Vector m_data; // storage for a lower-triangular matrix
|
||||
std::vector<Scalar*> m_colptr; // pointers to columns
|
||||
IntVector m_perm; // [-2, -1, 3, 1, 4, 5]: 0 <-> 2, 1 <-> 1, 2 <-> 3, 3 <-> 1, 4 <-> 4, 5 <-> 5
|
||||
std::vector<std::pair<Index, Index> > m_permc; // compressed version of m_perm: [(0, 2), (2, 3), (3, 1)]
|
||||
|
||||
bool m_computed;
|
||||
int m_info;
|
||||
|
||||
// Access to elements
|
||||
// Pointer to the k-th column
|
||||
Scalar* col_pointer(Index k) { return m_colptr[k]; }
|
||||
// A[i, j] -> m_colptr[j][i - j], i >= j
|
||||
Scalar& coeff(Index i, Index j) { return m_colptr[j][i - j]; }
|
||||
const Scalar& coeff(Index i, Index j) const { return m_colptr[j][i - j]; }
|
||||
// A[i, i] -> m_colptr[i][0]
|
||||
Scalar& diag_coeff(Index i) { return m_colptr[i][0]; }
|
||||
const Scalar& diag_coeff(Index i) const { return m_colptr[i][0]; }
|
||||
|
||||
// Compute column pointers
|
||||
void compute_pointer()
|
||||
{
|
||||
m_colptr.clear();
|
||||
m_colptr.reserve(m_n);
|
||||
Scalar* head = m_data.data();
|
||||
|
||||
for (Index i = 0; i < m_n; i++)
|
||||
{
|
||||
m_colptr.push_back(head);
|
||||
head += (m_n - i);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy mat - shift * I to m_data
|
||||
void copy_data(ConstGenericMatrix& mat, int uplo, const Scalar& shift)
|
||||
{
|
||||
if (uplo == Eigen::Lower)
|
||||
{
|
||||
for (Index j = 0; j < m_n; j++)
|
||||
{
|
||||
const Scalar* begin = &mat.coeffRef(j, j);
|
||||
const Index len = m_n - j;
|
||||
std::copy(begin, begin + len, col_pointer(j));
|
||||
diag_coeff(j) -= shift;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Scalar* dest = m_data.data();
|
||||
for (Index i = 0; i < m_n; i++)
|
||||
{
|
||||
for (Index j = i; j < m_n; j++, dest++)
|
||||
{
|
||||
*dest = mat.coeff(i, j);
|
||||
}
|
||||
diag_coeff(i) -= shift;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute compressed permutations
|
||||
void compress_permutation()
|
||||
{
|
||||
for (Index i = 0; i < m_n; i++)
|
||||
{
|
||||
// Recover the permutation action
|
||||
const Index perm = (m_perm[i] >= 0) ? (m_perm[i]) : (-m_perm[i] - 1);
|
||||
if (perm != i)
|
||||
m_permc.push_back(std::make_pair(i, perm));
|
||||
}
|
||||
}
|
||||
|
||||
// Working on the A[k:end, k:end] submatrix
|
||||
// Exchange k <-> r
|
||||
// Assume r >= k
|
||||
void pivoting_1x1(Index k, Index r)
|
||||
{
|
||||
// No permutation
|
||||
if (k == r)
|
||||
{
|
||||
m_perm[k] = r;
|
||||
return;
|
||||
}
|
||||
|
||||
// A[k, k] <-> A[r, r]
|
||||
std::swap(diag_coeff(k), diag_coeff(r));
|
||||
|
||||
// A[(r+1):end, k] <-> A[(r+1):end, r]
|
||||
std::swap_ranges(&coeff(r + 1, k), col_pointer(k + 1), &coeff(r + 1, r));
|
||||
|
||||
// A[(k+1):(r-1), k] <-> A[r, (k+1):(r-1)]
|
||||
Scalar* src = &coeff(k + 1, k);
|
||||
for (Index j = k + 1; j < r; j++, src++)
|
||||
{
|
||||
std::swap(*src, coeff(r, j));
|
||||
}
|
||||
|
||||
m_perm[k] = r;
|
||||
}
|
||||
|
||||
// Working on the A[k:end, k:end] submatrix
|
||||
// Exchange [k+1, k] <-> [r, p]
|
||||
// Assume p >= k, r >= k+1
|
||||
void pivoting_2x2(Index k, Index r, Index p)
|
||||
{
|
||||
pivoting_1x1(k, p);
|
||||
pivoting_1x1(k + 1, r);
|
||||
|
||||
// A[k+1, k] <-> A[r, k]
|
||||
std::swap(coeff(k + 1, k), coeff(r, k));
|
||||
|
||||
// Use negative signs to indicate a 2x2 block
|
||||
// Also minus one to distinguish a negative zero from a positive zero
|
||||
m_perm[k] = -m_perm[k] - 1;
|
||||
m_perm[k + 1] = -m_perm[k + 1] - 1;
|
||||
}
|
||||
|
||||
// A[r1, c1:c2] <-> A[r2, c1:c2]
|
||||
// Assume r2 >= r1 > c2 >= c1
|
||||
void interchange_rows(Index r1, Index r2, Index c1, Index c2)
|
||||
{
|
||||
if (r1 == r2)
|
||||
return;
|
||||
|
||||
for (Index j = c1; j <= c2; j++)
|
||||
{
|
||||
std::swap(coeff(r1, j), coeff(r2, j));
|
||||
}
|
||||
}
|
||||
|
||||
// lambda = |A[r, k]| = max{|A[k+1, k]|, ..., |A[end, k]|}
|
||||
// Largest (in magnitude) off-diagonal element in the first column of the current reduced matrix
|
||||
// r is the row index
|
||||
// Assume k < end
|
||||
Scalar find_lambda(Index k, Index& r)
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
const Scalar* head = col_pointer(k); // => A[k, k]
|
||||
const Scalar* end = col_pointer(k + 1);
|
||||
// Start with r=k+1, lambda=A[k+1, k]
|
||||
r = k + 1;
|
||||
Scalar lambda = abs(head[1]);
|
||||
// Scan remaining elements
|
||||
for (const Scalar* ptr = head + 2; ptr < end; ptr++)
|
||||
{
|
||||
const Scalar abs_elem = abs(*ptr);
|
||||
if (lambda < abs_elem)
|
||||
{
|
||||
lambda = abs_elem;
|
||||
r = k + (ptr - head);
|
||||
}
|
||||
}
|
||||
|
||||
return lambda;
|
||||
}
|
||||
|
||||
// sigma = |A[p, r]| = max {|A[k, r]|, ..., |A[end, r]|} \ {A[r, r]}
|
||||
// Largest (in magnitude) off-diagonal element in the r-th column of the current reduced matrix
|
||||
// p is the row index
|
||||
// Assume k < r < end
|
||||
Scalar find_sigma(Index k, Index r, Index& p)
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
// First search A[r+1, r], ..., A[end, r], which has the same task as find_lambda()
|
||||
// If r == end, we skip this search
|
||||
Scalar sigma = Scalar(-1);
|
||||
if (r < m_n - 1)
|
||||
sigma = find_lambda(r, p);
|
||||
|
||||
// Then search A[k, r], ..., A[r-1, r], which maps to A[r, k], ..., A[r, r-1]
|
||||
for (Index j = k; j < r; j++)
|
||||
{
|
||||
const Scalar abs_elem = abs(coeff(r, j));
|
||||
if (sigma < abs_elem)
|
||||
{
|
||||
sigma = abs_elem;
|
||||
p = j;
|
||||
}
|
||||
}
|
||||
|
||||
return sigma;
|
||||
}
|
||||
|
||||
// Generate permutations and apply to A
|
||||
// Return true if the resulting pivoting is 1x1, and false if 2x2
|
||||
bool permutate_mat(Index k, const Scalar& alpha)
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
Index r = k, p = k;
|
||||
const Scalar lambda = find_lambda(k, r);
|
||||
|
||||
// If lambda=0, no need to interchange
|
||||
if (lambda > Scalar(0))
|
||||
{
|
||||
const Scalar abs_akk = abs(diag_coeff(k));
|
||||
// If |A[k, k]| >= alpha * lambda, no need to interchange
|
||||
if (abs_akk < alpha * lambda)
|
||||
{
|
||||
const Scalar sigma = find_sigma(k, r, p);
|
||||
|
||||
// If sigma * |A[k, k]| >= alpha * lambda^2, no need to interchange
|
||||
if (sigma * abs_akk < alpha * lambda * lambda)
|
||||
{
|
||||
if (abs_akk >= alpha * sigma)
|
||||
{
|
||||
// Permutation on A
|
||||
pivoting_1x1(k, r);
|
||||
|
||||
// Permutation on L
|
||||
interchange_rows(k, r, 0, k - 1);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// There are two versions of permutation here
|
||||
// 1. A[k+1, k] <-> A[r, k]
|
||||
// 2. A[k+1, k] <-> A[r, p], where p >= k and r >= k+1
|
||||
//
|
||||
// Version 1 and 2 are used by Ref[1] and Ref[2], respectively
|
||||
|
||||
// Version 1 implementation
|
||||
p = k;
|
||||
|
||||
// Version 2 implementation
|
||||
// [r, p] and [p, r] are symmetric, but we need to make sure
|
||||
// p >= k and r >= k+1, so it is safe to always make r > p
|
||||
// One exception is when min{r,p} == k+1, in which case we make
|
||||
// r = k+1, so that only one permutation needs to be performed
|
||||
/* const Index rp_min = std::min(r, p);
|
||||
const Index rp_max = std::max(r, p);
|
||||
if(rp_min == k + 1)
|
||||
{
|
||||
r = rp_min; p = rp_max;
|
||||
} else {
|
||||
r = rp_max; p = rp_min;
|
||||
} */
|
||||
|
||||
// Right now we use Version 1 since it reduces the overhead of interchange
|
||||
|
||||
// Permutation on A
|
||||
pivoting_2x2(k, r, p);
|
||||
// Permutation on L
|
||||
interchange_rows(k, p, 0, k - 1);
|
||||
interchange_rows(k + 1, r, 0, k - 1);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// E = [e11, e12]
|
||||
// [e21, e22]
|
||||
// Overwrite E with inv(E)
|
||||
void inverse_inplace_2x2(Scalar& e11, Scalar& e21, Scalar& e22) const
|
||||
{
|
||||
// inv(E) = [d11, d12], d11 = e22/delta, d21 = -e21/delta, d22 = e11/delta
|
||||
// [d21, d22]
|
||||
const Scalar delta = e11 * e22 - e21 * e21;
|
||||
std::swap(e11, e22);
|
||||
e11 /= delta;
|
||||
e22 /= delta;
|
||||
e21 = -e21 / delta;
|
||||
}
|
||||
|
||||
// Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE
|
||||
int gaussian_elimination_1x1(Index k)
|
||||
{
|
||||
// D = 1 / A[k, k]
|
||||
const Scalar akk = diag_coeff(k);
|
||||
// Return NUMERICAL_ISSUE if not invertible
|
||||
if (akk == Scalar(0))
|
||||
return NUMERICAL_ISSUE;
|
||||
|
||||
diag_coeff(k) = Scalar(1) / akk;
|
||||
|
||||
// B -= l * l' / A[k, k], B := A[(k+1):end, (k+1):end], l := L[(k+1):end, k]
|
||||
Scalar* lptr = col_pointer(k) + 1;
|
||||
const Index ldim = m_n - k - 1;
|
||||
MapVec l(lptr, ldim);
|
||||
for (Index j = 0; j < ldim; j++)
|
||||
{
|
||||
MapVec(col_pointer(j + k + 1), ldim - j).noalias() -= (lptr[j] / akk) * l.tail(ldim - j);
|
||||
}
|
||||
|
||||
// l /= A[k, k]
|
||||
l /= akk;
|
||||
|
||||
return SUCCESSFUL;
|
||||
}
|
||||
|
||||
// Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE
|
||||
int gaussian_elimination_2x2(Index k)
|
||||
{
|
||||
// D = inv(E)
|
||||
Scalar& e11 = diag_coeff(k);
|
||||
Scalar& e21 = coeff(k + 1, k);
|
||||
Scalar& e22 = diag_coeff(k + 1);
|
||||
// Return NUMERICAL_ISSUE if not invertible
|
||||
if (e11 * e22 - e21 * e21 == Scalar(0))
|
||||
return NUMERICAL_ISSUE;
|
||||
|
||||
inverse_inplace_2x2(e11, e21, e22);
|
||||
|
||||
// X = l * inv(E), l := L[(k+2):end, k:(k+1)]
|
||||
Scalar* l1ptr = &coeff(k + 2, k);
|
||||
Scalar* l2ptr = &coeff(k + 2, k + 1);
|
||||
const Index ldim = m_n - k - 2;
|
||||
MapVec l1(l1ptr, ldim), l2(l2ptr, ldim);
|
||||
|
||||
Eigen::Matrix<Scalar, Eigen::Dynamic, 2> X(ldim, 2);
|
||||
X.col(0).noalias() = l1 * e11 + l2 * e21;
|
||||
X.col(1).noalias() = l1 * e21 + l2 * e22;
|
||||
|
||||
// B -= l * inv(E) * l' = X * l', B = A[(k+2):end, (k+2):end]
|
||||
for (Index j = 0; j < ldim; j++)
|
||||
{
|
||||
MapVec(col_pointer(j + k + 2), ldim - j).noalias() -= (X.col(0).tail(ldim - j) * l1ptr[j] + X.col(1).tail(ldim - j) * l2ptr[j]);
|
||||
}
|
||||
|
||||
// l = X
|
||||
l1.noalias() = X.col(0);
|
||||
l2.noalias() = X.col(1);
|
||||
|
||||
return SUCCESSFUL;
|
||||
}
|
||||
|
||||
public:
|
||||
BKLDLT() :
|
||||
m_n(0), m_computed(false), m_info(NOT_COMPUTED)
|
||||
{}
|
||||
|
||||
// Factorize mat - shift * I
|
||||
BKLDLT(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) :
|
||||
m_n(mat.rows()), m_computed(false), m_info(NOT_COMPUTED)
|
||||
{
|
||||
compute(mat, uplo, shift);
|
||||
}
|
||||
|
||||
void compute(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0))
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
m_n = mat.rows();
|
||||
if (m_n != mat.cols())
|
||||
throw std::invalid_argument("BKLDLT: matrix must be square");
|
||||
|
||||
m_perm.setLinSpaced(m_n, 0, m_n - 1);
|
||||
m_permc.clear();
|
||||
|
||||
// Copy data
|
||||
m_data.resize((m_n * (m_n + 1)) / 2);
|
||||
compute_pointer();
|
||||
copy_data(mat, uplo, shift);
|
||||
|
||||
const Scalar alpha = (1.0 + std::sqrt(17.0)) / 8.0;
|
||||
Index k = 0;
|
||||
for (k = 0; k < m_n - 1; k++)
|
||||
{
|
||||
// 1. Interchange rows and columns of A, and save the result to m_perm
|
||||
bool is_1x1 = permutate_mat(k, alpha);
|
||||
|
||||
// 2. Gaussian elimination
|
||||
if (is_1x1)
|
||||
{
|
||||
m_info = gaussian_elimination_1x1(k);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_info = gaussian_elimination_2x2(k);
|
||||
k++;
|
||||
}
|
||||
|
||||
// 3. Check status
|
||||
if (m_info != SUCCESSFUL)
|
||||
break;
|
||||
}
|
||||
// Invert the last 1x1 block if it exists
|
||||
if (k == m_n - 1)
|
||||
{
|
||||
const Scalar akk = diag_coeff(k);
|
||||
if (akk == Scalar(0))
|
||||
m_info = NUMERICAL_ISSUE;
|
||||
|
||||
diag_coeff(k) = Scalar(1) / diag_coeff(k);
|
||||
}
|
||||
|
||||
compress_permutation();
|
||||
|
||||
m_computed = true;
|
||||
}
|
||||
|
||||
// Solve Ax=b
|
||||
void solve_inplace(GenericVector b) const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("BKLDLT: need to call compute() first");
|
||||
|
||||
// PAP' = LDL'
|
||||
// 1. b -> Pb
|
||||
Scalar* x = b.data();
|
||||
MapVec res(x, m_n);
|
||||
Index npermc = m_permc.size();
|
||||
for (Index i = 0; i < npermc; i++)
|
||||
{
|
||||
std::swap(x[m_permc[i].first], x[m_permc[i].second]);
|
||||
}
|
||||
|
||||
// 2. Lz = Pb
|
||||
// If m_perm[end] < 0, then end with m_n - 3, otherwise end with m_n - 2
|
||||
const Index end = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2);
|
||||
for (Index i = 0; i <= end; i++)
|
||||
{
|
||||
const Index b1size = m_n - i - 1;
|
||||
const Index b2size = b1size - 1;
|
||||
if (m_perm[i] >= 0)
|
||||
{
|
||||
MapConstVec l(&coeff(i + 1, i), b1size);
|
||||
res.segment(i + 1, b1size).noalias() -= l * x[i];
|
||||
}
|
||||
else
|
||||
{
|
||||
MapConstVec l1(&coeff(i + 2, i), b2size);
|
||||
MapConstVec l2(&coeff(i + 2, i + 1), b2size);
|
||||
res.segment(i + 2, b2size).noalias() -= (l1 * x[i] + l2 * x[i + 1]);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Dw = z
|
||||
for (Index i = 0; i < m_n; i++)
|
||||
{
|
||||
const Scalar e11 = diag_coeff(i);
|
||||
if (m_perm[i] >= 0)
|
||||
{
|
||||
x[i] *= e11;
|
||||
}
|
||||
else
|
||||
{
|
||||
const Scalar e21 = coeff(i + 1, i), e22 = diag_coeff(i + 1);
|
||||
const Scalar wi = x[i] * e11 + x[i + 1] * e21;
|
||||
x[i + 1] = x[i] * e21 + x[i + 1] * e22;
|
||||
x[i] = wi;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
// 4. L'y = w
|
||||
// If m_perm[end] < 0, then start with m_n - 3, otherwise start with m_n - 2
|
||||
Index i = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2);
|
||||
for (; i >= 0; i--)
|
||||
{
|
||||
const Index ldim = m_n - i - 1;
|
||||
MapConstVec l(&coeff(i + 1, i), ldim);
|
||||
x[i] -= res.segment(i + 1, ldim).dot(l);
|
||||
|
||||
if (m_perm[i] < 0)
|
||||
{
|
||||
MapConstVec l2(&coeff(i + 1, i - 1), ldim);
|
||||
x[i - 1] -= res.segment(i + 1, ldim).dot(l2);
|
||||
i--;
|
||||
}
|
||||
}
|
||||
|
||||
// 5. x = P'y
|
||||
for (Index i = npermc - 1; i >= 0; i--)
|
||||
{
|
||||
std::swap(x[m_permc[i].first], x[m_permc[i].second]);
|
||||
}
|
||||
}
|
||||
|
||||
Vector solve(ConstGenericVector& b) const
|
||||
{
|
||||
Vector res = b;
|
||||
solve_inplace(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
int info() const { return m_info; }
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // BK_LDLT_H
|
|
@ -0,0 +1,384 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef DOUBLE_SHIFT_QR_H
|
||||
#define DOUBLE_SHIFT_QR_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <vector> // std::vector
|
||||
#include <algorithm> // std::min, std::fill, std::copy
|
||||
#include <cmath> // std::abs, std::sqrt, std::pow
|
||||
#include <stdexcept> // std::invalid_argument, std::logic_error
|
||||
|
||||
#include "../Util/TypeTraits.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
template <typename Scalar = double>
|
||||
class DoubleShiftQR
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, 3, Eigen::Dynamic> Matrix3X;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Array<unsigned char, Eigen::Dynamic, 1> IntArray;
|
||||
|
||||
typedef Eigen::Ref<Matrix> GenericMatrix;
|
||||
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
|
||||
|
||||
Index m_n; // Dimension of the matrix
|
||||
Matrix m_mat_H; // A copy of the matrix to be factorized
|
||||
Scalar m_shift_s; // Shift constant
|
||||
Scalar m_shift_t; // Shift constant
|
||||
Matrix3X m_ref_u; // Householder reflectors
|
||||
IntArray m_ref_nr; // How many rows does each reflector affects
|
||||
// 3 - A general reflector
|
||||
// 2 - A Givens rotation
|
||||
// 1 - An identity transformation
|
||||
const Scalar m_near_0; // a very small value, but 1.0 / m_safe_min does not overflow
|
||||
// ~= 1e-307 for the "double" type
|
||||
const Scalar m_eps; // the machine precision,
|
||||
// e.g. ~= 1e-16 for the "double" type
|
||||
const Scalar m_eps_rel;
|
||||
const Scalar m_eps_abs;
|
||||
bool m_computed; // Whether matrix has been factorized
|
||||
|
||||
void compute_reflector(const Scalar& x1, const Scalar& x2, const Scalar& x3, Index ind)
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
Scalar* u = &m_ref_u.coeffRef(0, ind);
|
||||
unsigned char* nr = m_ref_nr.data();
|
||||
// In general case the reflector affects 3 rows
|
||||
nr[ind] = 3;
|
||||
Scalar x2x3 = Scalar(0);
|
||||
// If x3 is zero, decrease nr by 1
|
||||
if (abs(x3) < m_near_0)
|
||||
{
|
||||
// If x2 is also zero, nr will be 1, and we can exit this function
|
||||
if (abs(x2) < m_near_0)
|
||||
{
|
||||
nr[ind] = 1;
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
nr[ind] = 2;
|
||||
}
|
||||
x2x3 = abs(x2);
|
||||
}
|
||||
else
|
||||
{
|
||||
x2x3 = Eigen::numext::hypot(x2, x3);
|
||||
}
|
||||
|
||||
// x1' = x1 - rho * ||x||
|
||||
// rho = -sign(x1), if x1 == 0, we choose rho = 1
|
||||
Scalar x1_new = x1 - ((x1 <= 0) - (x1 > 0)) * Eigen::numext::hypot(x1, x2x3);
|
||||
Scalar x_norm = Eigen::numext::hypot(x1_new, x2x3);
|
||||
// Double check the norm of new x
|
||||
if (x_norm < m_near_0)
|
||||
{
|
||||
nr[ind] = 1;
|
||||
return;
|
||||
}
|
||||
u[0] = x1_new / x_norm;
|
||||
u[1] = x2 / x_norm;
|
||||
u[2] = x3 / x_norm;
|
||||
}
|
||||
|
||||
void compute_reflector(const Scalar* x, Index ind)
|
||||
{
|
||||
compute_reflector(x[0], x[1], x[2], ind);
|
||||
}
|
||||
|
||||
// Update the block X = H(il:iu, il:iu)
|
||||
void update_block(Index il, Index iu)
|
||||
{
|
||||
// Block size
|
||||
const Index bsize = iu - il + 1;
|
||||
|
||||
// If block size == 1, there is no need to apply reflectors
|
||||
if (bsize == 1)
|
||||
{
|
||||
m_ref_nr.coeffRef(il) = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
const Scalar x00 = m_mat_H.coeff(il, il),
|
||||
x01 = m_mat_H.coeff(il, il + 1),
|
||||
x10 = m_mat_H.coeff(il + 1, il),
|
||||
x11 = m_mat_H.coeff(il + 1, il + 1);
|
||||
// m00 = x00 * (x00 - s) + x01 * x10 + t
|
||||
const Scalar m00 = x00 * (x00 - m_shift_s) + x01 * x10 + m_shift_t;
|
||||
// m10 = x10 * (x00 + x11 - s)
|
||||
const Scalar m10 = x10 * (x00 + x11 - m_shift_s);
|
||||
|
||||
// For block size == 2, do a Givens rotation on M = X * X - s * X + t * I
|
||||
if (bsize == 2)
|
||||
{
|
||||
// This causes nr=2
|
||||
compute_reflector(m00, m10, 0, il);
|
||||
// Apply the reflector to X
|
||||
apply_PX(m_mat_H.block(il, il, 2, m_n - il), m_n, il);
|
||||
apply_XP(m_mat_H.block(0, il, il + 2, 2), m_n, il);
|
||||
|
||||
m_ref_nr.coeffRef(il + 1) = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
// For block size >=3, use the regular strategy
|
||||
// m20 = x21 * x10
|
||||
const Scalar m20 = m_mat_H.coeff(il + 2, il + 1) * m_mat_H.coeff(il + 1, il);
|
||||
compute_reflector(m00, m10, m20, il);
|
||||
|
||||
// Apply the first reflector
|
||||
apply_PX(m_mat_H.block(il, il, 3, m_n - il), m_n, il);
|
||||
apply_XP(m_mat_H.block(0, il, il + std::min(bsize, Index(4)), 3), m_n, il);
|
||||
|
||||
// Calculate the following reflectors
|
||||
// If entering this loop, block size is at least 4.
|
||||
for (Index i = 1; i < bsize - 2; i++)
|
||||
{
|
||||
compute_reflector(&m_mat_H.coeffRef(il + i, il + i - 1), il + i);
|
||||
// Apply the reflector to X
|
||||
apply_PX(m_mat_H.block(il + i, il + i - 1, 3, m_n - il - i + 1), m_n, il + i);
|
||||
apply_XP(m_mat_H.block(0, il + i, il + std::min(bsize, Index(i + 4)), 3), m_n, il + i);
|
||||
}
|
||||
|
||||
// The last reflector
|
||||
// This causes nr=2
|
||||
compute_reflector(m_mat_H.coeff(iu - 1, iu - 2), m_mat_H.coeff(iu, iu - 2), 0, iu - 1);
|
||||
// Apply the reflector to X
|
||||
apply_PX(m_mat_H.block(iu - 1, iu - 2, 2, m_n - iu + 2), m_n, iu - 1);
|
||||
apply_XP(m_mat_H.block(0, iu - 1, il + bsize, 2), m_n, iu - 1);
|
||||
|
||||
m_ref_nr.coeffRef(iu) = 1;
|
||||
}
|
||||
|
||||
// P = I - 2 * u * u' = P'
|
||||
// PX = X - 2 * u * (u'X)
|
||||
void apply_PX(GenericMatrix X, Index stride, Index u_ind) const
|
||||
{
|
||||
const Index nr = m_ref_nr.coeff(u_ind);
|
||||
if (nr == 1)
|
||||
return;
|
||||
|
||||
const Scalar u0 = m_ref_u.coeff(0, u_ind),
|
||||
u1 = m_ref_u.coeff(1, u_ind);
|
||||
const Scalar u0_2 = Scalar(2) * u0,
|
||||
u1_2 = Scalar(2) * u1;
|
||||
|
||||
const Index nrow = X.rows();
|
||||
const Index ncol = X.cols();
|
||||
|
||||
Scalar* xptr = X.data();
|
||||
if (nr == 2 || nrow == 2)
|
||||
{
|
||||
for (Index i = 0; i < ncol; i++, xptr += stride)
|
||||
{
|
||||
const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1];
|
||||
xptr[0] -= tmp * u0;
|
||||
xptr[1] -= tmp * u1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
const Scalar u2 = m_ref_u.coeff(2, u_ind);
|
||||
const Scalar u2_2 = Scalar(2) * u2;
|
||||
for (Index i = 0; i < ncol; i++, xptr += stride)
|
||||
{
|
||||
const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1] + u2_2 * xptr[2];
|
||||
xptr[0] -= tmp * u0;
|
||||
xptr[1] -= tmp * u1;
|
||||
xptr[2] -= tmp * u2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// x is a pointer to a vector
|
||||
// Px = x - 2 * dot(x, u) * u
|
||||
void apply_PX(Scalar* x, Index u_ind) const
|
||||
{
|
||||
const Index nr = m_ref_nr.coeff(u_ind);
|
||||
if (nr == 1)
|
||||
return;
|
||||
|
||||
const Scalar u0 = m_ref_u.coeff(0, u_ind),
|
||||
u1 = m_ref_u.coeff(1, u_ind),
|
||||
u2 = m_ref_u.coeff(2, u_ind);
|
||||
|
||||
// When the reflector only contains two elements, u2 has been set to zero
|
||||
const bool nr_is_2 = (nr == 2);
|
||||
const Scalar dot2 = Scalar(2) * (x[0] * u0 + x[1] * u1 + (nr_is_2 ? 0 : (x[2] * u2)));
|
||||
x[0] -= dot2 * u0;
|
||||
x[1] -= dot2 * u1;
|
||||
if (!nr_is_2)
|
||||
x[2] -= dot2 * u2;
|
||||
}
|
||||
|
||||
// XP = X - 2 * (X * u) * u'
|
||||
void apply_XP(GenericMatrix X, Index stride, Index u_ind) const
|
||||
{
|
||||
const Index nr = m_ref_nr.coeff(u_ind);
|
||||
if (nr == 1)
|
||||
return;
|
||||
|
||||
const Scalar u0 = m_ref_u.coeff(0, u_ind),
|
||||
u1 = m_ref_u.coeff(1, u_ind);
|
||||
const Scalar u0_2 = Scalar(2) * u0,
|
||||
u1_2 = Scalar(2) * u1;
|
||||
|
||||
const int nrow = X.rows();
|
||||
const int ncol = X.cols();
|
||||
Scalar *X0 = X.data(), *X1 = X0 + stride; // X0 => X.col(0), X1 => X.col(1)
|
||||
|
||||
if (nr == 2 || ncol == 2)
|
||||
{
|
||||
// tmp = 2 * u0 * X0 + 2 * u1 * X1
|
||||
// X0 => X0 - u0 * tmp
|
||||
// X1 => X1 - u1 * tmp
|
||||
for (Index i = 0; i < nrow; i++)
|
||||
{
|
||||
const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i];
|
||||
X0[i] -= tmp * u0;
|
||||
X1[i] -= tmp * u1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Scalar* X2 = X1 + stride; // X2 => X.col(2)
|
||||
const Scalar u2 = m_ref_u.coeff(2, u_ind);
|
||||
const Scalar u2_2 = Scalar(2) * u2;
|
||||
for (Index i = 0; i < nrow; i++)
|
||||
{
|
||||
const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i] + u2_2 * X2[i];
|
||||
X0[i] -= tmp * u0;
|
||||
X1[i] -= tmp * u1;
|
||||
X2[i] -= tmp * u2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
DoubleShiftQR(Index size) :
|
||||
m_n(size),
|
||||
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
|
||||
m_eps(Eigen::NumTraits<Scalar>::epsilon()),
|
||||
m_eps_rel(m_eps),
|
||||
m_eps_abs(m_near_0 * (m_n / m_eps)),
|
||||
m_computed(false)
|
||||
{}
|
||||
|
||||
DoubleShiftQR(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) :
|
||||
m_n(mat.rows()),
|
||||
m_mat_H(m_n, m_n),
|
||||
m_shift_s(s),
|
||||
m_shift_t(t),
|
||||
m_ref_u(3, m_n),
|
||||
m_ref_nr(m_n),
|
||||
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
|
||||
m_eps(Eigen::NumTraits<Scalar>::epsilon()),
|
||||
m_eps_rel(m_eps),
|
||||
m_eps_abs(m_near_0 * (m_n / m_eps)),
|
||||
m_computed(false)
|
||||
{
|
||||
compute(mat, s, t);
|
||||
}
|
||||
|
||||
void compute(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t)
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
m_n = mat.rows();
|
||||
if (m_n != mat.cols())
|
||||
throw std::invalid_argument("DoubleShiftQR: matrix must be square");
|
||||
|
||||
m_mat_H.resize(m_n, m_n);
|
||||
m_shift_s = s;
|
||||
m_shift_t = t;
|
||||
m_ref_u.resize(3, m_n);
|
||||
m_ref_nr.resize(m_n);
|
||||
|
||||
// Make a copy of mat
|
||||
std::copy(mat.data(), mat.data() + mat.size(), m_mat_H.data());
|
||||
|
||||
// Obtain the indices of zero elements in the subdiagonal,
|
||||
// so that H can be divided into several blocks
|
||||
std::vector<int> zero_ind;
|
||||
zero_ind.reserve(m_n - 1);
|
||||
zero_ind.push_back(0);
|
||||
Scalar* Hii = m_mat_H.data();
|
||||
for (Index i = 0; i < m_n - 2; i++, Hii += (m_n + 1))
|
||||
{
|
||||
// Hii[1] => m_mat_H(i + 1, i)
|
||||
const Scalar h = abs(Hii[1]);
|
||||
if (h <= 0 || h <= m_eps_rel * (abs(Hii[0]) + abs(Hii[m_n + 1])))
|
||||
{
|
||||
Hii[1] = 0;
|
||||
zero_ind.push_back(i + 1);
|
||||
}
|
||||
// Make sure m_mat_H is upper Hessenberg
|
||||
// Zero the elements below m_mat_H(i + 1, i)
|
||||
std::fill(Hii + 2, Hii + m_n - i, Scalar(0));
|
||||
}
|
||||
zero_ind.push_back(m_n);
|
||||
|
||||
for (std::vector<int>::size_type i = 0; i < zero_ind.size() - 1; i++)
|
||||
{
|
||||
const Index start = zero_ind[i];
|
||||
const Index end = zero_ind[i + 1] - 1;
|
||||
// Compute refelctors and update each block
|
||||
update_block(start, end);
|
||||
}
|
||||
|
||||
m_computed = true;
|
||||
}
|
||||
|
||||
void matrix_QtHQ(Matrix& dest) const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("DoubleShiftQR: need to call compute() first");
|
||||
|
||||
dest.noalias() = m_mat_H;
|
||||
}
|
||||
|
||||
// Q = P0 * P1 * ...
|
||||
// Q'y = P_{n-2} * ... * P1 * P0 * y
|
||||
void apply_QtY(Vector& y) const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("DoubleShiftQR: need to call compute() first");
|
||||
|
||||
Scalar* y_ptr = y.data();
|
||||
const Index n1 = m_n - 1;
|
||||
for (Index i = 0; i < n1; i++, y_ptr++)
|
||||
{
|
||||
apply_PX(y_ptr, i);
|
||||
}
|
||||
}
|
||||
|
||||
// Q = P0 * P1 * ...
|
||||
// YQ = Y * P0 * P1 * ...
|
||||
void apply_YQ(GenericMatrix Y) const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("DoubleShiftQR: need to call compute() first");
|
||||
|
||||
const Index nrow = Y.rows();
|
||||
const Index n2 = m_n - 2;
|
||||
for (Index i = 0; i < n2; i++)
|
||||
{
|
||||
apply_XP(Y.block(0, i, nrow, 3), nrow, i);
|
||||
}
|
||||
apply_XP(Y.block(0, n2, nrow, 2), nrow, n2);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // DOUBLE_SHIFT_QR_H
|
|
@ -0,0 +1,167 @@
|
|||
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef LANCZOS_H
|
||||
#define LANCZOS_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <cmath> // std::sqrt
|
||||
#include <stdexcept> // std::invalid_argument
|
||||
#include <sstream> // std::stringstream
|
||||
|
||||
#include "Arnoldi.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
// Lanczos factorization A * V = V * H + f * e'
|
||||
// A: n x n
|
||||
// V: n x k
|
||||
// H: k x k
|
||||
// f: n x 1
|
||||
// e: [0, ..., 0, 1]
|
||||
// V and H are allocated of dimension m, so the maximum value of k is m
|
||||
template <typename Scalar, typename ArnoldiOpType>
|
||||
class Lanczos : public Arnoldi<Scalar, ArnoldiOpType>
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<Matrix> MapMat;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef Eigen::Map<const Matrix> MapConstMat;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
|
||||
using Arnoldi<Scalar, ArnoldiOpType>::m_op;
|
||||
using Arnoldi<Scalar, ArnoldiOpType>::m_n;
|
||||
using Arnoldi<Scalar, ArnoldiOpType>::m_m;
|
||||
using Arnoldi<Scalar, ArnoldiOpType>::m_k;
|
||||
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_V;
|
||||
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_H;
|
||||
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_f;
|
||||
using Arnoldi<Scalar, ArnoldiOpType>::m_beta;
|
||||
using Arnoldi<Scalar, ArnoldiOpType>::m_near_0;
|
||||
using Arnoldi<Scalar, ArnoldiOpType>::m_eps;
|
||||
|
||||
public:
|
||||
Lanczos(const ArnoldiOpType& op, Index m) :
|
||||
Arnoldi<Scalar, ArnoldiOpType>(op, m)
|
||||
{}
|
||||
|
||||
// Lanczos factorization starting from step-k
|
||||
void factorize_from(Index from_k, Index to_m, Index& op_counter)
|
||||
{
|
||||
using std::sqrt;
|
||||
|
||||
if (to_m <= from_k)
|
||||
return;
|
||||
|
||||
if (from_k > m_k)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << "Lanczos: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")";
|
||||
throw std::invalid_argument(msg.str());
|
||||
}
|
||||
|
||||
const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n));
|
||||
|
||||
// Pre-allocate vectors
|
||||
Vector Vf(to_m);
|
||||
Vector w(m_n);
|
||||
|
||||
// Keep the upperleft k x k submatrix of H and set other elements to 0
|
||||
m_fac_H.rightCols(m_m - from_k).setZero();
|
||||
m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero();
|
||||
|
||||
for (Index i = from_k; i <= to_m - 1; i++)
|
||||
{
|
||||
bool restart = false;
|
||||
// If beta = 0, then the next V is not full rank
|
||||
// We need to generate a new residual vector that is orthogonal
|
||||
// to the current V, which we call a restart
|
||||
if (m_beta < m_near_0)
|
||||
{
|
||||
MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns
|
||||
this->expand_basis(V, 2 * i, m_fac_f, m_beta);
|
||||
restart = true;
|
||||
}
|
||||
|
||||
// v <- f / ||f||
|
||||
MapVec v(&m_fac_V(0, i), m_n); // The (i+1)-th column
|
||||
v.noalias() = m_fac_f / m_beta;
|
||||
|
||||
// Note that H[i+1, i] equals to the unrestarted beta
|
||||
m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta;
|
||||
|
||||
// w <- A * v
|
||||
m_op.perform_op(v.data(), w.data());
|
||||
op_counter++;
|
||||
|
||||
// H[i+1, i+1] = <v, w> = v'Bw
|
||||
m_fac_H(i - 1, i) = m_fac_H(i, i - 1); // Due to symmetry
|
||||
m_fac_H(i, i) = m_op.inner_product(v, w);
|
||||
|
||||
// f <- w - V * V'Bw = w - H[i+1, i] * V{i} - H[i+1, i+1] * V{i+1}
|
||||
// If restarting, we know that H[i+1, i] = 0
|
||||
if (restart)
|
||||
m_fac_f.noalias() = w - m_fac_H(i, i) * v;
|
||||
else
|
||||
m_fac_f.noalias() = w - m_fac_H(i, i - 1) * m_fac_V.col(i - 1) - m_fac_H(i, i) * v;
|
||||
|
||||
m_beta = m_op.norm(m_fac_f);
|
||||
|
||||
// f/||f|| is going to be the next column of V, so we need to test
|
||||
// whether V'B(f/||f||) ~= 0
|
||||
const Index i1 = i + 1;
|
||||
MapMat Vs(m_fac_V.data(), m_n, i1); // The first (i+1) columns
|
||||
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
|
||||
Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
|
||||
// If not, iteratively correct the residual
|
||||
int count = 0;
|
||||
while (count < 5 && ortho_err > m_eps * m_beta)
|
||||
{
|
||||
// There is an edge case: when beta=||f|| is close to zero, f mostly consists
|
||||
// of noises of rounding errors, so the test [ortho_err < eps * beta] is very
|
||||
// likely to fail. In particular, if beta=0, then the test is ensured to fail.
|
||||
// Hence when this happens, we force f to be zero, and then restart in the
|
||||
// next iteration.
|
||||
if (m_beta < beta_thresh)
|
||||
{
|
||||
m_fac_f.setZero();
|
||||
m_beta = Scalar(0);
|
||||
break;
|
||||
}
|
||||
|
||||
// f <- f - V * Vf
|
||||
m_fac_f.noalias() -= Vs * Vf.head(i1);
|
||||
// h <- h + Vf
|
||||
m_fac_H(i - 1, i) += Vf[i - 1];
|
||||
m_fac_H(i, i - 1) = m_fac_H(i - 1, i);
|
||||
m_fac_H(i, i) += Vf[i];
|
||||
// beta <- ||f||
|
||||
m_beta = m_op.norm(m_fac_f);
|
||||
|
||||
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
|
||||
ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
// Indicate that this is a step-m factorization
|
||||
m_k = to_m;
|
||||
}
|
||||
|
||||
// Apply H -> Q'HQ, where Q is from a tridiagonal QR decomposition
|
||||
void compress_H(const TridiagQR<Scalar>& decomp)
|
||||
{
|
||||
decomp.matrix_QtHQ(m_fac_H);
|
||||
m_k--;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // LANCZOS_H
|
|
@ -0,0 +1,219 @@
|
|||
// The code was adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h
|
||||
//
|
||||
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
|
||||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef TRIDIAG_EIGEN_H
|
||||
#define TRIDIAG_EIGEN_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/Jacobi>
|
||||
#include <stdexcept>
|
||||
|
||||
#include "../Util/TypeTraits.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
template <typename Scalar = double>
|
||||
class TridiagEigen
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
// For convenience in adapting the tridiagonal_qr_step() function
|
||||
typedef Scalar RealScalar;
|
||||
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
|
||||
typedef Eigen::Ref<Matrix> GenericMatrix;
|
||||
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
|
||||
|
||||
Index m_n;
|
||||
Vector m_main_diag; // Main diagonal elements of the matrix
|
||||
Vector m_sub_diag; // Sub-diagonal elements of the matrix
|
||||
Matrix m_evecs; // To store eigenvectors
|
||||
|
||||
bool m_computed;
|
||||
const Scalar m_near_0; // a very small value, ~= 1e-307 for the "double" type
|
||||
|
||||
// Adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h
|
||||
static void tridiagonal_qr_step(RealScalar* diag,
|
||||
RealScalar* subdiag, Index start,
|
||||
Index end, Scalar* matrixQ,
|
||||
Index n)
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
RealScalar td = (diag[end - 1] - diag[end]) * RealScalar(0.5);
|
||||
RealScalar e = subdiag[end - 1];
|
||||
// Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
|
||||
// underflow thus leading to inf/NaN values when using the following commented code:
|
||||
// RealScalar e2 = numext::abs2(subdiag[end-1]);
|
||||
// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
|
||||
// This explain the following, somewhat more complicated, version:
|
||||
RealScalar mu = diag[end];
|
||||
if (td == Scalar(0))
|
||||
mu -= abs(e);
|
||||
else
|
||||
{
|
||||
RealScalar e2 = Eigen::numext::abs2(subdiag[end - 1]);
|
||||
RealScalar h = Eigen::numext::hypot(td, e);
|
||||
if (e2 == RealScalar(0))
|
||||
mu -= (e / (td + (td > RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h);
|
||||
else
|
||||
mu -= e2 / (td + (td > RealScalar(0) ? h : -h));
|
||||
}
|
||||
|
||||
RealScalar x = diag[start] - mu;
|
||||
RealScalar z = subdiag[start];
|
||||
Eigen::Map<Matrix> q(matrixQ, n, n);
|
||||
for (Index k = start; k < end; ++k)
|
||||
{
|
||||
Eigen::JacobiRotation<RealScalar> rot;
|
||||
rot.makeGivens(x, z);
|
||||
|
||||
const RealScalar s = rot.s();
|
||||
const RealScalar c = rot.c();
|
||||
|
||||
// do T = G' T G
|
||||
RealScalar sdk = s * diag[k] + c * subdiag[k];
|
||||
RealScalar dkp1 = s * subdiag[k] + c * diag[k + 1];
|
||||
|
||||
diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k + 1]);
|
||||
diag[k + 1] = s * sdk + c * dkp1;
|
||||
subdiag[k] = c * sdk - s * dkp1;
|
||||
|
||||
if (k > start)
|
||||
subdiag[k - 1] = c * subdiag[k - 1] - s * z;
|
||||
|
||||
x = subdiag[k];
|
||||
|
||||
if (k < end - 1)
|
||||
{
|
||||
z = -s * subdiag[k + 1];
|
||||
subdiag[k + 1] = c * subdiag[k + 1];
|
||||
}
|
||||
|
||||
// apply the givens rotation to the unit matrix Q = Q * G
|
||||
if (matrixQ)
|
||||
q.applyOnTheRight(k, k + 1, rot);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
TridiagEigen() :
|
||||
m_n(0), m_computed(false),
|
||||
m_near_0(TypeTraits<Scalar>::min() * Scalar(10))
|
||||
{}
|
||||
|
||||
TridiagEigen(ConstGenericMatrix& mat) :
|
||||
m_n(mat.rows()), m_computed(false),
|
||||
m_near_0(TypeTraits<Scalar>::min() * Scalar(10))
|
||||
{
|
||||
compute(mat);
|
||||
}
|
||||
|
||||
void compute(ConstGenericMatrix& mat)
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
m_n = mat.rows();
|
||||
if (m_n != mat.cols())
|
||||
throw std::invalid_argument("TridiagEigen: matrix must be square");
|
||||
|
||||
m_main_diag.resize(m_n);
|
||||
m_sub_diag.resize(m_n - 1);
|
||||
m_evecs.resize(m_n, m_n);
|
||||
m_evecs.setIdentity();
|
||||
|
||||
// Scale matrix to improve stability
|
||||
const Scalar scale = std::max(mat.diagonal().cwiseAbs().maxCoeff(),
|
||||
mat.diagonal(-1).cwiseAbs().maxCoeff());
|
||||
// If scale=0, mat is a zero matrix, so we can early stop
|
||||
if (scale < m_near_0)
|
||||
{
|
||||
// m_main_diag contains eigenvalues
|
||||
m_main_diag.setZero();
|
||||
// m_evecs has been set identity
|
||||
// m_evecs.setIdentity();
|
||||
m_computed = true;
|
||||
return;
|
||||
}
|
||||
m_main_diag.noalias() = mat.diagonal() / scale;
|
||||
m_sub_diag.noalias() = mat.diagonal(-1) / scale;
|
||||
|
||||
Scalar* diag = m_main_diag.data();
|
||||
Scalar* subdiag = m_sub_diag.data();
|
||||
|
||||
Index end = m_n - 1;
|
||||
Index start = 0;
|
||||
Index iter = 0; // total number of iterations
|
||||
int info = 0; // 0 for success, 1 for failure
|
||||
|
||||
const Scalar considerAsZero = TypeTraits<Scalar>::min();
|
||||
const Scalar precision = Scalar(2) * Eigen::NumTraits<Scalar>::epsilon();
|
||||
|
||||
while (end > 0)
|
||||
{
|
||||
for (Index i = start; i < end; i++)
|
||||
if (abs(subdiag[i]) <= considerAsZero ||
|
||||
abs(subdiag[i]) <= (abs(diag[i]) + abs(diag[i + 1])) * precision)
|
||||
subdiag[i] = 0;
|
||||
|
||||
// find the largest unreduced block
|
||||
while (end > 0 && subdiag[end - 1] == Scalar(0))
|
||||
end--;
|
||||
|
||||
if (end <= 0)
|
||||
break;
|
||||
|
||||
// if we spent too many iterations, we give up
|
||||
iter++;
|
||||
if (iter > 30 * m_n)
|
||||
{
|
||||
info = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
start = end - 1;
|
||||
while (start > 0 && subdiag[start - 1] != Scalar(0))
|
||||
start--;
|
||||
|
||||
tridiagonal_qr_step(diag, subdiag, start, end, m_evecs.data(), m_n);
|
||||
}
|
||||
|
||||
if (info > 0)
|
||||
throw std::runtime_error("TridiagEigen: eigen decomposition failed");
|
||||
|
||||
// Scale eigenvalues back
|
||||
m_main_diag *= scale;
|
||||
|
||||
m_computed = true;
|
||||
}
|
||||
|
||||
const Vector& eigenvalues() const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("TridiagEigen: need to call compute() first");
|
||||
|
||||
// After calling compute(), main_diag will contain the eigenvalues.
|
||||
return m_main_diag;
|
||||
}
|
||||
|
||||
const Matrix& eigenvectors() const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("TridiagEigen: need to call compute() first");
|
||||
|
||||
return m_evecs;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // TRIDIAG_EIGEN_H
|
|
@ -0,0 +1,319 @@
|
|||
// The code was adapted from Eigen/src/Eigenvaleus/EigenSolver.h
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
|
||||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef UPPER_HESSENBERG_EIGEN_H
|
||||
#define UPPER_HESSENBERG_EIGEN_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/Eigenvalues>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
template <typename Scalar = double>
|
||||
class UpperHessenbergEigen
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
|
||||
typedef Eigen::Ref<Matrix> GenericMatrix;
|
||||
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
|
||||
|
||||
typedef std::complex<Scalar> Complex;
|
||||
typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix;
|
||||
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
|
||||
|
||||
Index m_n; // Size of the matrix
|
||||
Eigen::RealSchur<Matrix> m_realSchur; // Schur decomposition solver
|
||||
Matrix m_matT; // Schur T matrix
|
||||
Matrix m_eivec; // Storing eigenvectors
|
||||
ComplexVector m_eivalues; // Eigenvalues
|
||||
|
||||
bool m_computed;
|
||||
|
||||
void doComputeEigenvectors()
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
const Index size = m_eivec.cols();
|
||||
const Scalar eps = Eigen::NumTraits<Scalar>::epsilon();
|
||||
|
||||
// inefficient! this is already computed in RealSchur
|
||||
Scalar norm(0);
|
||||
for (Index j = 0; j < size; ++j)
|
||||
{
|
||||
norm += m_matT.row(j).segment((std::max)(j - 1, Index(0)), size - (std::max)(j - 1, Index(0))).cwiseAbs().sum();
|
||||
}
|
||||
|
||||
// Backsubstitute to find vectors of upper triangular form
|
||||
if (norm == Scalar(0))
|
||||
return;
|
||||
|
||||
for (Index n = size - 1; n >= 0; n--)
|
||||
{
|
||||
Scalar p = m_eivalues.coeff(n).real();
|
||||
Scalar q = m_eivalues.coeff(n).imag();
|
||||
|
||||
// Scalar vector
|
||||
if (q == Scalar(0))
|
||||
{
|
||||
Scalar lastr(0), lastw(0);
|
||||
Index l = n;
|
||||
|
||||
m_matT.coeffRef(n, n) = Scalar(1);
|
||||
for (Index i = n - 1; i >= 0; i--)
|
||||
{
|
||||
Scalar w = m_matT.coeff(i, i) - p;
|
||||
Scalar r = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1));
|
||||
|
||||
if (m_eivalues.coeff(i).imag() < Scalar(0))
|
||||
{
|
||||
lastw = w;
|
||||
lastr = r;
|
||||
}
|
||||
else
|
||||
{
|
||||
l = i;
|
||||
if (m_eivalues.coeff(i).imag() == Scalar(0))
|
||||
{
|
||||
if (w != Scalar(0))
|
||||
m_matT.coeffRef(i, n) = -r / w;
|
||||
else
|
||||
m_matT.coeffRef(i, n) = -r / (eps * norm);
|
||||
}
|
||||
else // Solve real equations
|
||||
{
|
||||
Scalar x = m_matT.coeff(i, i + 1);
|
||||
Scalar y = m_matT.coeff(i + 1, i);
|
||||
Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
|
||||
Scalar t = (x * lastr - lastw * r) / denom;
|
||||
m_matT.coeffRef(i, n) = t;
|
||||
if (abs(x) > abs(lastw))
|
||||
m_matT.coeffRef(i + 1, n) = (-r - w * t) / x;
|
||||
else
|
||||
m_matT.coeffRef(i + 1, n) = (-lastr - y * t) / lastw;
|
||||
}
|
||||
|
||||
// Overflow control
|
||||
Scalar t = abs(m_matT.coeff(i, n));
|
||||
if ((eps * t) * t > Scalar(1))
|
||||
m_matT.col(n).tail(size - i) /= t;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (q < Scalar(0) && n > 0)
|
||||
{ // Complex vector
|
||||
Scalar lastra(0), lastsa(0), lastw(0);
|
||||
Index l = n - 1;
|
||||
|
||||
// Last vector component imaginary so matrix is triangular
|
||||
if (abs(m_matT.coeff(n, n - 1)) > abs(m_matT.coeff(n - 1, n)))
|
||||
{
|
||||
m_matT.coeffRef(n - 1, n - 1) = q / m_matT.coeff(n, n - 1);
|
||||
m_matT.coeffRef(n - 1, n) = -(m_matT.coeff(n, n) - p) / m_matT.coeff(n, n - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
Complex cc = Complex(Scalar(0), -m_matT.coeff(n - 1, n)) / Complex(m_matT.coeff(n - 1, n - 1) - p, q);
|
||||
m_matT.coeffRef(n - 1, n - 1) = Eigen::numext::real(cc);
|
||||
m_matT.coeffRef(n - 1, n) = Eigen::numext::imag(cc);
|
||||
}
|
||||
m_matT.coeffRef(n, n - 1) = Scalar(0);
|
||||
m_matT.coeffRef(n, n) = Scalar(1);
|
||||
for (Index i = n - 2; i >= 0; i--)
|
||||
{
|
||||
Scalar ra = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n - 1).segment(l, n - l + 1));
|
||||
Scalar sa = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1));
|
||||
Scalar w = m_matT.coeff(i, i) - p;
|
||||
|
||||
if (m_eivalues.coeff(i).imag() < Scalar(0))
|
||||
{
|
||||
lastw = w;
|
||||
lastra = ra;
|
||||
lastsa = sa;
|
||||
}
|
||||
else
|
||||
{
|
||||
l = i;
|
||||
if (m_eivalues.coeff(i).imag() == Scalar(0))
|
||||
{
|
||||
Complex cc = Complex(-ra, -sa) / Complex(w, q);
|
||||
m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc);
|
||||
m_matT.coeffRef(i, n) = Eigen::numext::imag(cc);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Solve complex equations
|
||||
Scalar x = m_matT.coeff(i, i + 1);
|
||||
Scalar y = m_matT.coeff(i + 1, i);
|
||||
Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
|
||||
Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;
|
||||
if ((vr == Scalar(0)) && (vi == Scalar(0)))
|
||||
vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw));
|
||||
|
||||
Complex cc = Complex(x * lastra - lastw * ra + q * sa, x * lastsa - lastw * sa - q * ra) / Complex(vr, vi);
|
||||
m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc);
|
||||
m_matT.coeffRef(i, n) = Eigen::numext::imag(cc);
|
||||
if (abs(x) > (abs(lastw) + abs(q)))
|
||||
{
|
||||
m_matT.coeffRef(i + 1, n - 1) = (-ra - w * m_matT.coeff(i, n - 1) + q * m_matT.coeff(i, n)) / x;
|
||||
m_matT.coeffRef(i + 1, n) = (-sa - w * m_matT.coeff(i, n) - q * m_matT.coeff(i, n - 1)) / x;
|
||||
}
|
||||
else
|
||||
{
|
||||
cc = Complex(-lastra - y * m_matT.coeff(i, n - 1), -lastsa - y * m_matT.coeff(i, n)) / Complex(lastw, q);
|
||||
m_matT.coeffRef(i + 1, n - 1) = Eigen::numext::real(cc);
|
||||
m_matT.coeffRef(i + 1, n) = Eigen::numext::imag(cc);
|
||||
}
|
||||
}
|
||||
|
||||
// Overflow control
|
||||
Scalar t = std::max(abs(m_matT.coeff(i, n - 1)), abs(m_matT.coeff(i, n)));
|
||||
if ((eps * t) * t > Scalar(1))
|
||||
m_matT.block(i, n - 1, size - i, 2) /= t;
|
||||
}
|
||||
}
|
||||
|
||||
// We handled a pair of complex conjugate eigenvalues, so need to skip them both
|
||||
n--;
|
||||
}
|
||||
}
|
||||
|
||||
// Back transformation to get eigenvectors of original matrix
|
||||
Vector m_tmp(size);
|
||||
for (Index j = size - 1; j >= 0; j--)
|
||||
{
|
||||
m_tmp.noalias() = m_eivec.leftCols(j + 1) * m_matT.col(j).segment(0, j + 1);
|
||||
m_eivec.col(j) = m_tmp;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
UpperHessenbergEigen() :
|
||||
m_n(0), m_computed(false)
|
||||
{}
|
||||
|
||||
UpperHessenbergEigen(ConstGenericMatrix& mat) :
|
||||
m_n(mat.rows()), m_computed(false)
|
||||
{
|
||||
compute(mat);
|
||||
}
|
||||
|
||||
void compute(ConstGenericMatrix& mat)
|
||||
{
|
||||
using std::abs;
|
||||
using std::sqrt;
|
||||
|
||||
if (mat.rows() != mat.cols())
|
||||
throw std::invalid_argument("UpperHessenbergEigen: matrix must be square");
|
||||
|
||||
m_n = mat.rows();
|
||||
// Scale matrix prior to the Schur decomposition
|
||||
const Scalar scale = mat.cwiseAbs().maxCoeff();
|
||||
|
||||
// Reduce to real Schur form
|
||||
Matrix Q = Matrix::Identity(m_n, m_n);
|
||||
m_realSchur.computeFromHessenberg(mat / scale, Q, true);
|
||||
if (m_realSchur.info() != Eigen::Success)
|
||||
throw std::runtime_error("UpperHessenbergEigen: eigen decomposition failed");
|
||||
|
||||
m_matT = m_realSchur.matrixT();
|
||||
m_eivec = m_realSchur.matrixU();
|
||||
|
||||
// Compute eigenvalues from matT
|
||||
m_eivalues.resize(m_n);
|
||||
Index i = 0;
|
||||
while (i < m_n)
|
||||
{
|
||||
// Real eigenvalue
|
||||
if (i == m_n - 1 || m_matT.coeff(i + 1, i) == Scalar(0))
|
||||
{
|
||||
m_eivalues.coeffRef(i) = m_matT.coeff(i, i);
|
||||
++i;
|
||||
}
|
||||
else // Complex eigenvalues
|
||||
{
|
||||
Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i + 1, i + 1));
|
||||
Scalar z;
|
||||
// Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
|
||||
// without overflow
|
||||
{
|
||||
Scalar t0 = m_matT.coeff(i + 1, i);
|
||||
Scalar t1 = m_matT.coeff(i, i + 1);
|
||||
Scalar maxval = std::max(abs(p), std::max(abs(t0), abs(t1)));
|
||||
t0 /= maxval;
|
||||
t1 /= maxval;
|
||||
Scalar p0 = p / maxval;
|
||||
z = maxval * sqrt(abs(p0 * p0 + t0 * t1));
|
||||
}
|
||||
m_eivalues.coeffRef(i) = Complex(m_matT.coeff(i + 1, i + 1) + p, z);
|
||||
m_eivalues.coeffRef(i + 1) = Complex(m_matT.coeff(i + 1, i + 1) + p, -z);
|
||||
i += 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute eigenvectors
|
||||
doComputeEigenvectors();
|
||||
|
||||
// Scale eigenvalues back
|
||||
m_eivalues *= scale;
|
||||
|
||||
m_computed = true;
|
||||
}
|
||||
|
||||
const ComplexVector& eigenvalues() const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("UpperHessenbergEigen: need to call compute() first");
|
||||
|
||||
return m_eivalues;
|
||||
}
|
||||
|
||||
ComplexMatrix eigenvectors()
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
if (!m_computed)
|
||||
throw std::logic_error("UpperHessenbergEigen: need to call compute() first");
|
||||
|
||||
Index n = m_eivec.cols();
|
||||
ComplexMatrix matV(n, n);
|
||||
for (Index j = 0; j < n; ++j)
|
||||
{
|
||||
// imaginary part of real eigenvalue is already set to exact zero
|
||||
if (Eigen::numext::imag(m_eivalues.coeff(j)) == Scalar(0) || j + 1 == n)
|
||||
{
|
||||
// we have a real eigen value
|
||||
matV.col(j) = m_eivec.col(j).template cast<Complex>();
|
||||
matV.col(j).normalize();
|
||||
}
|
||||
else
|
||||
{
|
||||
// we have a pair of complex eigen values
|
||||
for (Index i = 0; i < n; ++i)
|
||||
{
|
||||
matV.coeffRef(i, j) = Complex(m_eivec.coeff(i, j), m_eivec.coeff(i, j + 1));
|
||||
matV.coeffRef(i, j + 1) = Complex(m_eivec.coeff(i, j), -m_eivec.coeff(i, j + 1));
|
||||
}
|
||||
matV.col(j).normalize();
|
||||
matV.col(j + 1).normalize();
|
||||
++j;
|
||||
}
|
||||
}
|
||||
|
||||
return matV;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // UPPER_HESSENBERG_EIGEN_H
|
|
@ -0,0 +1,670 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef UPPER_HESSENBERG_QR_H
|
||||
#define UPPER_HESSENBERG_QR_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <cmath> // std::sqrt
|
||||
#include <algorithm> // std::fill, std::copy
|
||||
#include <stdexcept> // std::logic_error
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \defgroup Internals Internal Classes
|
||||
///
|
||||
/// Classes for internal use. May be useful to developers.
|
||||
///
|
||||
|
||||
///
|
||||
/// \ingroup Internals
|
||||
/// @{
|
||||
///
|
||||
|
||||
///
|
||||
/// \defgroup LinearAlgebra Linear Algebra
|
||||
///
|
||||
/// A number of classes for linear algebra operations.
|
||||
///
|
||||
|
||||
///
|
||||
/// \ingroup LinearAlgebra
|
||||
///
|
||||
/// Perform the QR decomposition of an upper Hessenberg matrix.
|
||||
///
|
||||
/// \tparam Scalar The element type of the matrix.
|
||||
/// Currently supported types are `float`, `double` and `long double`.
|
||||
///
|
||||
template <typename Scalar = double>
|
||||
class UpperHessenbergQR
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Matrix<Scalar, 1, Eigen::Dynamic> RowVector;
|
||||
typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> Array;
|
||||
|
||||
typedef Eigen::Ref<Matrix> GenericMatrix;
|
||||
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
|
||||
|
||||
Matrix m_mat_T;
|
||||
|
||||
protected:
|
||||
Index m_n;
|
||||
// Gi = [ cos[i] sin[i]]
|
||||
// [-sin[i] cos[i]]
|
||||
// Q = G1 * G2 * ... * G_{n-1}
|
||||
Scalar m_shift;
|
||||
Array m_rot_cos;
|
||||
Array m_rot_sin;
|
||||
bool m_computed;
|
||||
|
||||
// Given x and y, compute 1) r = sqrt(x^2 + y^2), 2) c = x / r, 3) s = -y / r
|
||||
// If both x and y are zero, set c = 1 and s = 0
|
||||
// We must implement it in a numerically stable way
|
||||
static void compute_rotation(const Scalar& x, const Scalar& y, Scalar& r, Scalar& c, Scalar& s)
|
||||
{
|
||||
using std::sqrt;
|
||||
|
||||
const Scalar xsign = (x > Scalar(0)) - (x < Scalar(0));
|
||||
const Scalar ysign = (y > Scalar(0)) - (y < Scalar(0));
|
||||
const Scalar xabs = x * xsign;
|
||||
const Scalar yabs = y * ysign;
|
||||
if (xabs > yabs)
|
||||
{
|
||||
// In this case xabs != 0
|
||||
const Scalar ratio = yabs / xabs; // so that 0 <= ratio < 1
|
||||
const Scalar common = sqrt(Scalar(1) + ratio * ratio);
|
||||
c = xsign / common;
|
||||
r = xabs * common;
|
||||
s = -y / r;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (yabs == Scalar(0))
|
||||
{
|
||||
r = Scalar(0);
|
||||
c = Scalar(1);
|
||||
s = Scalar(0);
|
||||
return;
|
||||
}
|
||||
const Scalar ratio = xabs / yabs; // so that 0 <= ratio <= 1
|
||||
const Scalar common = sqrt(Scalar(1) + ratio * ratio);
|
||||
s = -ysign / common;
|
||||
r = yabs * common;
|
||||
c = x / r;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to preallocate memory. Computation can
|
||||
/// be performed later by calling the compute() method.
|
||||
///
|
||||
UpperHessenbergQR(Index size) :
|
||||
m_n(size),
|
||||
m_rot_cos(m_n - 1),
|
||||
m_rot_sin(m_n - 1),
|
||||
m_computed(false)
|
||||
{}
|
||||
|
||||
///
|
||||
/// Constructor to create an object that performs and stores the
|
||||
/// QR decomposition of an upper Hessenberg matrix `mat`, with an
|
||||
/// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix
|
||||
/// `mat`, and \f$s\f$ is the shift.
|
||||
///
|
||||
/// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
|
||||
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
/// Only the upper triangular and the lower subdiagonal parts of
|
||||
/// the matrix are used.
|
||||
///
|
||||
UpperHessenbergQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) :
|
||||
m_n(mat.rows()),
|
||||
m_shift(shift),
|
||||
m_rot_cos(m_n - 1),
|
||||
m_rot_sin(m_n - 1),
|
||||
m_computed(false)
|
||||
{
|
||||
compute(mat, shift);
|
||||
}
|
||||
|
||||
///
|
||||
/// Virtual destructor.
|
||||
///
|
||||
virtual ~UpperHessenbergQR(){};
|
||||
|
||||
///
|
||||
/// Conduct the QR factorization of an upper Hessenberg matrix with
|
||||
/// an optional shift.
|
||||
///
|
||||
/// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
|
||||
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
/// Only the upper triangular and the lower subdiagonal parts of
|
||||
/// the matrix are used.
|
||||
///
|
||||
virtual void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0))
|
||||
{
|
||||
m_n = mat.rows();
|
||||
if (m_n != mat.cols())
|
||||
throw std::invalid_argument("UpperHessenbergQR: matrix must be square");
|
||||
|
||||
m_shift = shift;
|
||||
m_mat_T.resize(m_n, m_n);
|
||||
m_rot_cos.resize(m_n - 1);
|
||||
m_rot_sin.resize(m_n - 1);
|
||||
|
||||
// Make a copy of mat - s * I
|
||||
std::copy(mat.data(), mat.data() + mat.size(), m_mat_T.data());
|
||||
m_mat_T.diagonal().array() -= m_shift;
|
||||
|
||||
Scalar xi, xj, r, c, s;
|
||||
Scalar *Tii, *ptr;
|
||||
const Index n1 = m_n - 1;
|
||||
for (Index i = 0; i < n1; i++)
|
||||
{
|
||||
Tii = &m_mat_T.coeffRef(i, i);
|
||||
|
||||
// Make sure mat_T is upper Hessenberg
|
||||
// Zero the elements below mat_T(i + 1, i)
|
||||
std::fill(Tii + 2, Tii + m_n - i, Scalar(0));
|
||||
|
||||
xi = Tii[0]; // mat_T(i, i)
|
||||
xj = Tii[1]; // mat_T(i + 1, i)
|
||||
compute_rotation(xi, xj, r, c, s);
|
||||
m_rot_cos[i] = c;
|
||||
m_rot_sin[i] = s;
|
||||
|
||||
// For a complete QR decomposition,
|
||||
// we first obtain the rotation matrix
|
||||
// G = [ cos sin]
|
||||
// [-sin cos]
|
||||
// and then do T[i:(i + 1), i:(n - 1)] = G' * T[i:(i + 1), i:(n - 1)]
|
||||
|
||||
// Gt << c, -s, s, c;
|
||||
// m_mat_T.block(i, i, 2, m_n - i) = Gt * m_mat_T.block(i, i, 2, m_n - i);
|
||||
Tii[0] = r; // m_mat_T(i, i) => r
|
||||
Tii[1] = 0; // m_mat_T(i + 1, i) => 0
|
||||
ptr = Tii + m_n; // m_mat_T(i, k), k = i+1, i+2, ..., n-1
|
||||
for (Index j = i + 1; j < m_n; j++, ptr += m_n)
|
||||
{
|
||||
Scalar tmp = ptr[0];
|
||||
ptr[0] = c * tmp - s * ptr[1];
|
||||
ptr[1] = s * tmp + c * ptr[1];
|
||||
}
|
||||
|
||||
// If we do not need to calculate the R matrix, then
|
||||
// only the cos and sin sequences are required.
|
||||
// In such case we only update T[i + 1, (i + 1):(n - 1)]
|
||||
// m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) *= c;
|
||||
// m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) += s * mat_T.block(i, i + 1, 1, m_n - i - 1);
|
||||
}
|
||||
|
||||
m_computed = true;
|
||||
}
|
||||
|
||||
///
|
||||
/// Return the \f$R\f$ matrix in the QR decomposition, which is an
|
||||
/// upper triangular matrix.
|
||||
///
|
||||
/// \return Returned matrix type will be `Eigen::Matrix<Scalar, ...>`, depending on
|
||||
/// the template parameter `Scalar` defined.
|
||||
///
|
||||
virtual Matrix matrix_R() const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
|
||||
|
||||
return m_mat_T;
|
||||
}
|
||||
|
||||
///
|
||||
/// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`,
|
||||
/// and \f$s\f$ is the shift. The result is an upper Hessenberg matrix.
|
||||
///
|
||||
/// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix<Scalar, ...>`,
|
||||
/// depending on the template parameter `Scalar` defined.
|
||||
///
|
||||
virtual void matrix_QtHQ(Matrix& dest) const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
|
||||
|
||||
// Make a copy of the R matrix
|
||||
dest.resize(m_n, m_n);
|
||||
std::copy(m_mat_T.data(), m_mat_T.data() + m_mat_T.size(), dest.data());
|
||||
|
||||
// Compute the RQ matrix
|
||||
const Index n1 = m_n - 1;
|
||||
for (Index i = 0; i < n1; i++)
|
||||
{
|
||||
const Scalar c = m_rot_cos.coeff(i);
|
||||
const Scalar s = m_rot_sin.coeff(i);
|
||||
// RQ[, i:(i + 1)] = RQ[, i:(i + 1)] * Gi
|
||||
// Gi = [ cos[i] sin[i]]
|
||||
// [-sin[i] cos[i]]
|
||||
Scalar *Yi, *Yi1;
|
||||
Yi = &dest.coeffRef(0, i);
|
||||
Yi1 = Yi + m_n; // RQ(0, i + 1)
|
||||
const Index i2 = i + 2;
|
||||
for (Index j = 0; j < i2; j++)
|
||||
{
|
||||
const Scalar tmp = Yi[j];
|
||||
Yi[j] = c * tmp - s * Yi1[j];
|
||||
Yi1[j] = s * tmp + c * Yi1[j];
|
||||
}
|
||||
|
||||
/* Vector dest = RQ.block(0, i, i + 2, 1);
|
||||
dest.block(0, i, i + 2, 1) = c * Yi - s * dest.block(0, i + 1, i + 2, 1);
|
||||
dest.block(0, i + 1, i + 2, 1) = s * Yi + c * dest.block(0, i + 1, i + 2, 1); */
|
||||
}
|
||||
|
||||
// Add the shift to the diagonal
|
||||
dest.diagonal().array() += m_shift;
|
||||
}
|
||||
|
||||
///
|
||||
/// Apply the \f$Q\f$ matrix to a vector \f$y\f$.
|
||||
///
|
||||
/// \param Y A vector that will be overwritten by the matrix product \f$Qy\f$.
|
||||
///
|
||||
/// Vector type can be `Eigen::Vector<Scalar, ...>`, depending on
|
||||
/// the template parameter `Scalar` defined.
|
||||
///
|
||||
// Y -> QY = G1 * G2 * ... * Y
|
||||
void apply_QY(Vector& Y) const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
|
||||
|
||||
for (Index i = m_n - 2; i >= 0; i--)
|
||||
{
|
||||
const Scalar c = m_rot_cos.coeff(i);
|
||||
const Scalar s = m_rot_sin.coeff(i);
|
||||
// Y[i:(i + 1)] = Gi * Y[i:(i + 1)]
|
||||
// Gi = [ cos[i] sin[i]]
|
||||
// [-sin[i] cos[i]]
|
||||
const Scalar tmp = Y[i];
|
||||
Y[i] = c * tmp + s * Y[i + 1];
|
||||
Y[i + 1] = -s * tmp + c * Y[i + 1];
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Apply the \f$Q\f$ matrix to a vector \f$y\f$.
|
||||
///
|
||||
/// \param Y A vector that will be overwritten by the matrix product \f$Q'y\f$.
|
||||
///
|
||||
/// Vector type can be `Eigen::Vector<Scalar, ...>`, depending on
|
||||
/// the template parameter `Scalar` defined.
|
||||
///
|
||||
// Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y
|
||||
void apply_QtY(Vector& Y) const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
|
||||
|
||||
const Index n1 = m_n - 1;
|
||||
for (Index i = 0; i < n1; i++)
|
||||
{
|
||||
const Scalar c = m_rot_cos.coeff(i);
|
||||
const Scalar s = m_rot_sin.coeff(i);
|
||||
// Y[i:(i + 1)] = Gi' * Y[i:(i + 1)]
|
||||
// Gi = [ cos[i] sin[i]]
|
||||
// [-sin[i] cos[i]]
|
||||
const Scalar tmp = Y[i];
|
||||
Y[i] = c * tmp - s * Y[i + 1];
|
||||
Y[i + 1] = s * tmp + c * Y[i + 1];
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$.
|
||||
///
|
||||
/// \param Y A matrix that will be overwritten by the matrix product \f$QY\f$.
|
||||
///
|
||||
/// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
|
||||
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
///
|
||||
// Y -> QY = G1 * G2 * ... * Y
|
||||
void apply_QY(GenericMatrix Y) const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
|
||||
|
||||
RowVector Yi(Y.cols()), Yi1(Y.cols());
|
||||
for (Index i = m_n - 2; i >= 0; i--)
|
||||
{
|
||||
const Scalar c = m_rot_cos.coeff(i);
|
||||
const Scalar s = m_rot_sin.coeff(i);
|
||||
// Y[i:(i + 1), ] = Gi * Y[i:(i + 1), ]
|
||||
// Gi = [ cos[i] sin[i]]
|
||||
// [-sin[i] cos[i]]
|
||||
Yi.noalias() = Y.row(i);
|
||||
Yi1.noalias() = Y.row(i + 1);
|
||||
Y.row(i) = c * Yi + s * Yi1;
|
||||
Y.row(i + 1) = -s * Yi + c * Yi1;
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$.
|
||||
///
|
||||
/// \param Y A matrix that will be overwritten by the matrix product \f$Q'Y\f$.
|
||||
///
|
||||
/// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
|
||||
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
///
|
||||
// Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y
|
||||
void apply_QtY(GenericMatrix Y) const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
|
||||
|
||||
RowVector Yi(Y.cols()), Yi1(Y.cols());
|
||||
const Index n1 = m_n - 1;
|
||||
for (Index i = 0; i < n1; i++)
|
||||
{
|
||||
const Scalar c = m_rot_cos.coeff(i);
|
||||
const Scalar s = m_rot_sin.coeff(i);
|
||||
// Y[i:(i + 1), ] = Gi' * Y[i:(i + 1), ]
|
||||
// Gi = [ cos[i] sin[i]]
|
||||
// [-sin[i] cos[i]]
|
||||
Yi.noalias() = Y.row(i);
|
||||
Yi1.noalias() = Y.row(i + 1);
|
||||
Y.row(i) = c * Yi - s * Yi1;
|
||||
Y.row(i + 1) = s * Yi + c * Yi1;
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$.
|
||||
///
|
||||
/// \param Y A matrix that will be overwritten by the matrix product \f$YQ\f$.
|
||||
///
|
||||
/// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
|
||||
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
///
|
||||
// Y -> YQ = Y * G1 * G2 * ...
|
||||
void apply_YQ(GenericMatrix Y) const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
|
||||
|
||||
/*Vector Yi(Y.rows());
|
||||
for(Index i = 0; i < m_n - 1; i++)
|
||||
{
|
||||
const Scalar c = m_rot_cos.coeff(i);
|
||||
const Scalar s = m_rot_sin.coeff(i);
|
||||
// Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi
|
||||
// Gi = [ cos[i] sin[i]]
|
||||
// [-sin[i] cos[i]]
|
||||
Yi.noalias() = Y.col(i);
|
||||
Y.col(i) = c * Yi - s * Y.col(i + 1);
|
||||
Y.col(i + 1) = s * Yi + c * Y.col(i + 1);
|
||||
}*/
|
||||
Scalar *Y_col_i, *Y_col_i1;
|
||||
const Index n1 = m_n - 1;
|
||||
const Index nrow = Y.rows();
|
||||
for (Index i = 0; i < n1; i++)
|
||||
{
|
||||
const Scalar c = m_rot_cos.coeff(i);
|
||||
const Scalar s = m_rot_sin.coeff(i);
|
||||
|
||||
Y_col_i = &Y.coeffRef(0, i);
|
||||
Y_col_i1 = &Y.coeffRef(0, i + 1);
|
||||
for (Index j = 0; j < nrow; j++)
|
||||
{
|
||||
Scalar tmp = Y_col_i[j];
|
||||
Y_col_i[j] = c * tmp - s * Y_col_i1[j];
|
||||
Y_col_i1[j] = s * tmp + c * Y_col_i1[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$.
|
||||
///
|
||||
/// \param Y A matrix that will be overwritten by the matrix product \f$YQ'\f$.
|
||||
///
|
||||
/// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
|
||||
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
///
|
||||
// Y -> YQ' = Y * G_{n-1}' * ... * G2' * G1'
|
||||
void apply_YQt(GenericMatrix Y) const
|
||||
{
|
||||
if (!m_computed)
|
||||
throw std::logic_error("UpperHessenbergQR: need to call compute() first");
|
||||
|
||||
Vector Yi(Y.rows());
|
||||
for (Index i = m_n - 2; i >= 0; i--)
|
||||
{
|
||||
const Scalar c = m_rot_cos.coeff(i);
|
||||
const Scalar s = m_rot_sin.coeff(i);
|
||||
// Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi'
|
||||
// Gi = [ cos[i] sin[i]]
|
||||
// [-sin[i] cos[i]]
|
||||
Yi.noalias() = Y.col(i);
|
||||
Y.col(i) = c * Yi + s * Y.col(i + 1);
|
||||
Y.col(i + 1) = -s * Yi + c * Y.col(i + 1);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// \ingroup LinearAlgebra
|
||||
///
|
||||
/// Perform the QR decomposition of a tridiagonal matrix, a special
|
||||
/// case of upper Hessenberg matrices.
|
||||
///
|
||||
/// \tparam Scalar The element type of the matrix.
|
||||
/// Currently supported types are `float`, `double` and `long double`.
|
||||
///
|
||||
template <typename Scalar = double>
|
||||
class TridiagQR : public UpperHessenbergQR<Scalar>
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
|
||||
|
||||
typedef typename Matrix::Index Index;
|
||||
|
||||
Vector m_T_diag; // diagonal elements of T
|
||||
Vector m_T_lsub; // lower subdiagonal of T
|
||||
Vector m_T_usub; // upper subdiagonal of T
|
||||
Vector m_T_usub2; // 2nd upper subdiagonal of T
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to preallocate memory. Computation can
|
||||
/// be performed later by calling the compute() method.
|
||||
///
|
||||
TridiagQR(Index size) :
|
||||
UpperHessenbergQR<Scalar>(size)
|
||||
{}
|
||||
|
||||
///
|
||||
/// Constructor to create an object that performs and stores the
|
||||
/// QR decomposition of an upper Hessenberg matrix `mat`, with an
|
||||
/// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix
|
||||
/// `mat`, and \f$s\f$ is the shift.
|
||||
///
|
||||
/// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
|
||||
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
/// Only the major- and sub- diagonal parts of
|
||||
/// the matrix are used.
|
||||
///
|
||||
TridiagQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) :
|
||||
UpperHessenbergQR<Scalar>(mat.rows())
|
||||
{
|
||||
this->compute(mat, shift);
|
||||
}
|
||||
|
||||
///
|
||||
/// Conduct the QR factorization of a tridiagonal matrix with an
|
||||
/// optional shift.
|
||||
///
|
||||
/// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g.
|
||||
/// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
/// Only the major- and sub- diagonal parts of
|
||||
/// the matrix are used.
|
||||
///
|
||||
void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0))
|
||||
{
|
||||
this->m_n = mat.rows();
|
||||
if (this->m_n != mat.cols())
|
||||
throw std::invalid_argument("TridiagQR: matrix must be square");
|
||||
|
||||
this->m_shift = shift;
|
||||
m_T_diag.resize(this->m_n);
|
||||
m_T_lsub.resize(this->m_n - 1);
|
||||
m_T_usub.resize(this->m_n - 1);
|
||||
m_T_usub2.resize(this->m_n - 2);
|
||||
this->m_rot_cos.resize(this->m_n - 1);
|
||||
this->m_rot_sin.resize(this->m_n - 1);
|
||||
|
||||
m_T_diag.array() = mat.diagonal().array() - this->m_shift;
|
||||
m_T_lsub.noalias() = mat.diagonal(-1);
|
||||
m_T_usub.noalias() = m_T_lsub;
|
||||
|
||||
// A number of pointers to avoid repeated address calculation
|
||||
Scalar *c = this->m_rot_cos.data(), // pointer to the cosine vector
|
||||
*s = this->m_rot_sin.data(), // pointer to the sine vector
|
||||
r;
|
||||
const Index n1 = this->m_n - 1;
|
||||
for (Index i = 0; i < n1; i++)
|
||||
{
|
||||
// diag[i] == T[i, i]
|
||||
// lsub[i] == T[i + 1, i]
|
||||
// r = sqrt(T[i, i]^2 + T[i + 1, i]^2)
|
||||
// c = T[i, i] / r, s = -T[i + 1, i] / r
|
||||
this->compute_rotation(m_T_diag.coeff(i), m_T_lsub.coeff(i), r, *c, *s);
|
||||
|
||||
// For a complete QR decomposition,
|
||||
// we first obtain the rotation matrix
|
||||
// G = [ cos sin]
|
||||
// [-sin cos]
|
||||
// and then do T[i:(i + 1), i:(i + 2)] = G' * T[i:(i + 1), i:(i + 2)]
|
||||
|
||||
// Update T[i, i] and T[i + 1, i]
|
||||
// The updated value of T[i, i] is known to be r
|
||||
// The updated value of T[i + 1, i] is known to be 0
|
||||
m_T_diag.coeffRef(i) = r;
|
||||
m_T_lsub.coeffRef(i) = Scalar(0);
|
||||
// Update T[i, i + 1] and T[i + 1, i + 1]
|
||||
// usub[i] == T[i, i + 1]
|
||||
// diag[i + 1] == T[i + 1, i + 1]
|
||||
const Scalar tmp = m_T_usub.coeff(i);
|
||||
m_T_usub.coeffRef(i) = (*c) * tmp - (*s) * m_T_diag.coeff(i + 1);
|
||||
m_T_diag.coeffRef(i + 1) = (*s) * tmp + (*c) * m_T_diag.coeff(i + 1);
|
||||
// Update T[i, i + 2] and T[i + 1, i + 2]
|
||||
// usub2[i] == T[i, i + 2]
|
||||
// usub[i + 1] == T[i + 1, i + 2]
|
||||
if (i < n1 - 1)
|
||||
{
|
||||
m_T_usub2.coeffRef(i) = -(*s) * m_T_usub.coeff(i + 1);
|
||||
m_T_usub.coeffRef(i + 1) *= (*c);
|
||||
}
|
||||
|
||||
c++;
|
||||
s++;
|
||||
|
||||
// If we do not need to calculate the R matrix, then
|
||||
// only the cos and sin sequences are required.
|
||||
// In such case we only update T[i + 1, (i + 1):(i + 2)]
|
||||
// T[i + 1, i + 1] = c * T[i + 1, i + 1] + s * T[i, i + 1];
|
||||
// T[i + 1, i + 2] *= c;
|
||||
}
|
||||
|
||||
this->m_computed = true;
|
||||
}
|
||||
|
||||
///
|
||||
/// Return the \f$R\f$ matrix in the QR decomposition, which is an
|
||||
/// upper triangular matrix.
|
||||
///
|
||||
/// \return Returned matrix type will be `Eigen::Matrix<Scalar, ...>`, depending on
|
||||
/// the template parameter `Scalar` defined.
|
||||
///
|
||||
Matrix matrix_R() const
|
||||
{
|
||||
if (!this->m_computed)
|
||||
throw std::logic_error("TridiagQR: need to call compute() first");
|
||||
|
||||
Matrix R = Matrix::Zero(this->m_n, this->m_n);
|
||||
R.diagonal().noalias() = m_T_diag;
|
||||
R.diagonal(1).noalias() = m_T_usub;
|
||||
R.diagonal(2).noalias() = m_T_usub2;
|
||||
|
||||
return R;
|
||||
}
|
||||
|
||||
///
|
||||
/// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`,
|
||||
/// and \f$s\f$ is the shift. The result is a tridiagonal matrix.
|
||||
///
|
||||
/// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix<Scalar, ...>`,
|
||||
/// depending on the template parameter `Scalar` defined.
|
||||
///
|
||||
void matrix_QtHQ(Matrix& dest) const
|
||||
{
|
||||
if (!this->m_computed)
|
||||
throw std::logic_error("TridiagQR: need to call compute() first");
|
||||
|
||||
// Make a copy of the R matrix
|
||||
dest.resize(this->m_n, this->m_n);
|
||||
dest.setZero();
|
||||
dest.diagonal().noalias() = m_T_diag;
|
||||
// The upper diagonal refers to m_T_usub
|
||||
// The 2nd upper subdiagonal will be zero in RQ
|
||||
|
||||
// Compute the RQ matrix
|
||||
// [m11 m12] points to RQ[i:(i+1), i:(i+1)]
|
||||
// [0 m22]
|
||||
//
|
||||
// Gi = [ cos[i] sin[i]]
|
||||
// [-sin[i] cos[i]]
|
||||
const Index n1 = this->m_n - 1;
|
||||
for (Index i = 0; i < n1; i++)
|
||||
{
|
||||
const Scalar c = this->m_rot_cos.coeff(i);
|
||||
const Scalar s = this->m_rot_sin.coeff(i);
|
||||
const Scalar m11 = dest.coeff(i, i),
|
||||
m12 = m_T_usub.coeff(i),
|
||||
m22 = m_T_diag.coeff(i + 1);
|
||||
|
||||
// Update the diagonal and the lower subdiagonal of dest
|
||||
dest.coeffRef(i, i) = c * m11 - s * m12;
|
||||
dest.coeffRef(i + 1, i) = -s * m22;
|
||||
dest.coeffRef(i + 1, i + 1) = c * m22;
|
||||
}
|
||||
|
||||
// Copy the lower subdiagonal to upper subdiagonal
|
||||
dest.diagonal(1).noalias() = dest.diagonal(-1);
|
||||
|
||||
// Add the shift to the diagonal
|
||||
dest.diagonal().array() += this->m_shift;
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// @}
|
||||
///
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // UPPER_HESSENBERG_QR_H
|
|
@ -0,0 +1,108 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef DENSE_CHOLESKY_H
|
||||
#define DENSE_CHOLESKY_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/Cholesky>
|
||||
#include <stdexcept>
|
||||
#include "../Util/CompInfo.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup MatOp
|
||||
///
|
||||
/// This class defines the operations related to Cholesky decomposition on a
|
||||
/// positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular
|
||||
/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver
|
||||
/// in the Cholesky decomposition mode.
|
||||
///
|
||||
template <typename Scalar, int Uplo = Eigen::Lower>
|
||||
class DenseCholesky
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Matrix> MapConstMat;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
|
||||
|
||||
const Index m_n;
|
||||
Eigen::LLT<Matrix, Uplo> m_decomp;
|
||||
int m_info; // status of the decomposition
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param mat An **Eigen** matrix object, whose type can be
|
||||
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
|
||||
/// `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
///
|
||||
DenseCholesky(ConstGenericMatrix& mat) :
|
||||
m_n(mat.rows()), m_info(NOT_COMPUTED)
|
||||
{
|
||||
if (mat.rows() != mat.cols())
|
||||
throw std::invalid_argument("DenseCholesky: matrix must be square");
|
||||
|
||||
m_decomp.compute(mat);
|
||||
m_info = (m_decomp.info() == Eigen::Success) ?
|
||||
SUCCESSFUL :
|
||||
NUMERICAL_ISSUE;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_n; }
|
||||
///
|
||||
/// Returns the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_n; }
|
||||
|
||||
///
|
||||
/// Returns the status of the computation.
|
||||
/// The full list of enumeration values can be found in \ref Enumerations.
|
||||
///
|
||||
int info() const { return m_info; }
|
||||
|
||||
///
|
||||
/// Performs the lower triangular solving operation \f$y=L^{-1}x\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = inv(L) * x_in
|
||||
void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_n);
|
||||
MapVec y(y_out, m_n);
|
||||
y.noalias() = m_decomp.matrixL().solve(x);
|
||||
}
|
||||
|
||||
///
|
||||
/// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = inv(L') * x_in
|
||||
void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_n);
|
||||
MapVec y(y_out, m_n);
|
||||
y.noalias() = m_decomp.matrixU().solve(x);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // DENSE_CHOLESKY_H
|
|
@ -0,0 +1,102 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef DENSE_GEN_COMPLEX_SHIFT_SOLVE_H
|
||||
#define DENSE_GEN_COMPLEX_SHIFT_SOLVE_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/LU>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup MatOp
|
||||
///
|
||||
/// This class defines the complex shift-solve operation on a general real matrix \f$A\f$,
|
||||
/// i.e., calculating \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$ for any complex-valued
|
||||
/// \f$\sigma\f$ and real-valued vector \f$x\f$. It is mainly used in the
|
||||
/// GenEigsComplexShiftSolver eigen solver.
|
||||
///
|
||||
template <typename Scalar>
|
||||
class DenseGenComplexShiftSolve
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
|
||||
|
||||
typedef std::complex<Scalar> Complex;
|
||||
typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix;
|
||||
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
|
||||
|
||||
typedef Eigen::PartialPivLU<ComplexMatrix> ComplexSolver;
|
||||
|
||||
ConstGenericMatrix m_mat;
|
||||
const Index m_n;
|
||||
ComplexSolver m_solver;
|
||||
ComplexVector m_x_cache;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param mat An **Eigen** matrix object, whose type can be
|
||||
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
|
||||
/// `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
///
|
||||
DenseGenComplexShiftSolve(ConstGenericMatrix& mat) :
|
||||
m_mat(mat), m_n(mat.rows())
|
||||
{
|
||||
if (mat.rows() != mat.cols())
|
||||
throw std::invalid_argument("DenseGenComplexShiftSolve: matrix must be square");
|
||||
}
|
||||
|
||||
///
|
||||
/// Return the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_n; }
|
||||
///
|
||||
/// Return the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_n; }
|
||||
|
||||
///
|
||||
/// Set the complex shift \f$\sigma\f$.
|
||||
///
|
||||
/// \param sigmar Real part of \f$\sigma\f$.
|
||||
/// \param sigmai Imaginary part of \f$\sigma\f$.
|
||||
///
|
||||
void set_shift(Scalar sigmar, Scalar sigmai)
|
||||
{
|
||||
m_solver.compute(m_mat.template cast<Complex>() - Complex(sigmar, sigmai) * ComplexMatrix::Identity(m_n, m_n));
|
||||
m_x_cache.resize(m_n);
|
||||
m_x_cache.setZero();
|
||||
}
|
||||
|
||||
///
|
||||
/// Perform the complex shift-solve operation
|
||||
/// \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = Re( inv(A - sigma * I) * x_in )
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out)
|
||||
{
|
||||
m_x_cache.real() = MapConstVec(x_in, m_n);
|
||||
MapVec y(y_out, m_n);
|
||||
y.noalias() = m_solver.solve(m_x_cache).real();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // DENSE_GEN_COMPLEX_SHIFT_SOLVE_H
|
|
@ -0,0 +1,80 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef DENSE_GEN_MAT_PROD_H
|
||||
#define DENSE_GEN_MAT_PROD_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \defgroup MatOp Matrix Operations
|
||||
///
|
||||
/// Define matrix operations on existing matrix objects
|
||||
///
|
||||
|
||||
///
|
||||
/// \ingroup MatOp
|
||||
///
|
||||
/// This class defines the matrix-vector multiplication operation on a
|
||||
/// general real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
|
||||
/// \f$x\f$. It is mainly used in the GenEigsSolver and
|
||||
/// SymEigsSolver eigen solvers.
|
||||
///
|
||||
template <typename Scalar>
|
||||
class DenseGenMatProd
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
|
||||
|
||||
ConstGenericMatrix m_mat;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param mat An **Eigen** matrix object, whose type can be
|
||||
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
|
||||
/// `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
///
|
||||
DenseGenMatProd(ConstGenericMatrix& mat) :
|
||||
m_mat(mat)
|
||||
{}
|
||||
|
||||
///
|
||||
/// Return the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_mat.rows(); }
|
||||
///
|
||||
/// Return the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_mat.cols(); }
|
||||
|
||||
///
|
||||
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = A * x_in
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_mat.cols());
|
||||
MapVec y(y_out, m_mat.rows());
|
||||
y.noalias() = m_mat * x;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // DENSE_GEN_MAT_PROD_H
|
|
@ -0,0 +1,88 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef DENSE_GEN_REAL_SHIFT_SOLVE_H
|
||||
#define DENSE_GEN_REAL_SHIFT_SOLVE_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/LU>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup MatOp
|
||||
///
|
||||
/// This class defines the shift-solve operation on a general real matrix \f$A\f$,
|
||||
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
|
||||
/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver.
|
||||
///
|
||||
template <typename Scalar>
|
||||
class DenseGenRealShiftSolve
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
|
||||
|
||||
ConstGenericMatrix m_mat;
|
||||
const Index m_n;
|
||||
Eigen::PartialPivLU<Matrix> m_solver;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param mat An **Eigen** matrix object, whose type can be
|
||||
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
|
||||
/// `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
///
|
||||
DenseGenRealShiftSolve(ConstGenericMatrix& mat) :
|
||||
m_mat(mat), m_n(mat.rows())
|
||||
{
|
||||
if (mat.rows() != mat.cols())
|
||||
throw std::invalid_argument("DenseGenRealShiftSolve: matrix must be square");
|
||||
}
|
||||
|
||||
///
|
||||
/// Return the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_n; }
|
||||
///
|
||||
/// Return the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_n; }
|
||||
|
||||
///
|
||||
/// Set the real shift \f$\sigma\f$.
|
||||
///
|
||||
void set_shift(Scalar sigma)
|
||||
{
|
||||
m_solver.compute(m_mat - sigma * Matrix::Identity(m_n, m_n));
|
||||
}
|
||||
|
||||
///
|
||||
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = inv(A - sigma * I) * x_in
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_n);
|
||||
MapVec y(y_out, m_n);
|
||||
y.noalias() = m_solver.solve(x);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // DENSE_GEN_REAL_SHIFT_SOLVE_H
|
|
@ -0,0 +1,73 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef DENSE_SYM_MAT_PROD_H
|
||||
#define DENSE_SYM_MAT_PROD_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup MatOp
|
||||
///
|
||||
/// This class defines the matrix-vector multiplication operation on a
|
||||
/// symmetric real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
|
||||
/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver.
|
||||
///
|
||||
template <typename Scalar, int Uplo = Eigen::Lower>
|
||||
class DenseSymMatProd
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
|
||||
|
||||
ConstGenericMatrix m_mat;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param mat An **Eigen** matrix object, whose type can be
|
||||
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
|
||||
/// `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
///
|
||||
DenseSymMatProd(ConstGenericMatrix& mat) :
|
||||
m_mat(mat)
|
||||
{}
|
||||
|
||||
///
|
||||
/// Return the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_mat.rows(); }
|
||||
///
|
||||
/// Return the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_mat.cols(); }
|
||||
|
||||
///
|
||||
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = A * x_in
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_mat.cols());
|
||||
MapVec y(y_out, m_mat.rows());
|
||||
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // DENSE_SYM_MAT_PROD_H
|
|
@ -0,0 +1,92 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef DENSE_SYM_SHIFT_SOLVE_H
|
||||
#define DENSE_SYM_SHIFT_SOLVE_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <stdexcept>
|
||||
|
||||
#include "../LinAlg/BKLDLT.h"
|
||||
#include "../Util/CompInfo.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup MatOp
|
||||
///
|
||||
/// This class defines the shift-solve operation on a real symmetric matrix \f$A\f$,
|
||||
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
|
||||
/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver.
|
||||
///
|
||||
template <typename Scalar, int Uplo = Eigen::Lower>
|
||||
class DenseSymShiftSolve
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef const Eigen::Ref<const Matrix> ConstGenericMatrix;
|
||||
|
||||
ConstGenericMatrix m_mat;
|
||||
const int m_n;
|
||||
BKLDLT<Scalar> m_solver;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param mat An **Eigen** matrix object, whose type can be
|
||||
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
|
||||
/// `Eigen::MatrixXf`), or its mapped version
|
||||
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
|
||||
///
|
||||
DenseSymShiftSolve(ConstGenericMatrix& mat) :
|
||||
m_mat(mat), m_n(mat.rows())
|
||||
{
|
||||
if (mat.rows() != mat.cols())
|
||||
throw std::invalid_argument("DenseSymShiftSolve: matrix must be square");
|
||||
}
|
||||
|
||||
///
|
||||
/// Return the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_n; }
|
||||
///
|
||||
/// Return the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_n; }
|
||||
|
||||
///
|
||||
/// Set the real shift \f$\sigma\f$.
|
||||
///
|
||||
void set_shift(Scalar sigma)
|
||||
{
|
||||
m_solver.compute(m_mat, Uplo, sigma);
|
||||
if (m_solver.info() != SUCCESSFUL)
|
||||
throw std::invalid_argument("DenseSymShiftSolve: factorization failed with the given shift");
|
||||
}
|
||||
|
||||
///
|
||||
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = inv(A - sigma * I) * x_in
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_n);
|
||||
MapVec y(y_out, m_n);
|
||||
y.noalias() = m_solver.solve(x);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // DENSE_SYM_SHIFT_SOLVE_H
|
|
@ -0,0 +1,109 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SPARSE_CHOLESKY_H
|
||||
#define SPARSE_CHOLESKY_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/SparseCore>
|
||||
#include <Eigen/SparseCholesky>
|
||||
#include <stdexcept>
|
||||
#include "../Util/CompInfo.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup MatOp
|
||||
///
|
||||
/// This class defines the operations related to Cholesky decomposition on a
|
||||
/// sparse positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular
|
||||
/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver
|
||||
/// in the Cholesky decomposition mode.
|
||||
///
|
||||
template <typename Scalar, int Uplo = Eigen::Lower, int Flags = 0, typename StorageIndex = int>
|
||||
class SparseCholesky
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
|
||||
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
|
||||
|
||||
const Index m_n;
|
||||
Eigen::SimplicialLLT<SparseMatrix, Uplo> m_decomp;
|
||||
int m_info; // status of the decomposition
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param mat An **Eigen** sparse matrix object, whose type can be
|
||||
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
|
||||
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
|
||||
///
|
||||
SparseCholesky(ConstGenericSparseMatrix& mat) :
|
||||
m_n(mat.rows())
|
||||
{
|
||||
if (mat.rows() != mat.cols())
|
||||
throw std::invalid_argument("SparseCholesky: matrix must be square");
|
||||
|
||||
m_decomp.compute(mat);
|
||||
m_info = (m_decomp.info() == Eigen::Success) ?
|
||||
SUCCESSFUL :
|
||||
NUMERICAL_ISSUE;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_n; }
|
||||
///
|
||||
/// Returns the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_n; }
|
||||
|
||||
///
|
||||
/// Returns the status of the computation.
|
||||
/// The full list of enumeration values can be found in \ref Enumerations.
|
||||
///
|
||||
int info() const { return m_info; }
|
||||
|
||||
///
|
||||
/// Performs the lower triangular solving operation \f$y=L^{-1}x\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = inv(L) * x_in
|
||||
void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_n);
|
||||
MapVec y(y_out, m_n);
|
||||
y.noalias() = m_decomp.permutationP() * x;
|
||||
m_decomp.matrixL().solveInPlace(y);
|
||||
}
|
||||
|
||||
///
|
||||
/// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = inv(L') * x_in
|
||||
void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_n);
|
||||
MapVec y(y_out, m_n);
|
||||
y.noalias() = m_decomp.matrixU().solve(x);
|
||||
y = m_decomp.permutationPinv() * y;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SPARSE_CHOLESKY_H
|
|
@ -0,0 +1,74 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SPARSE_GEN_MAT_PROD_H
|
||||
#define SPARSE_GEN_MAT_PROD_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/SparseCore>
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup MatOp
|
||||
///
|
||||
/// This class defines the matrix-vector multiplication operation on a
|
||||
/// sparse real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
|
||||
/// \f$x\f$. It is mainly used in the GenEigsSolver and SymEigsSolver
|
||||
/// eigen solvers.
|
||||
///
|
||||
template <typename Scalar, int Flags = 0, typename StorageIndex = int>
|
||||
class SparseGenMatProd
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
|
||||
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
|
||||
|
||||
ConstGenericSparseMatrix m_mat;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param mat An **Eigen** sparse matrix object, whose type can be
|
||||
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
|
||||
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
|
||||
///
|
||||
SparseGenMatProd(ConstGenericSparseMatrix& mat) :
|
||||
m_mat(mat)
|
||||
{}
|
||||
|
||||
///
|
||||
/// Return the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_mat.rows(); }
|
||||
///
|
||||
/// Return the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_mat.cols(); }
|
||||
|
||||
///
|
||||
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = A * x_in
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_mat.cols());
|
||||
MapVec y(y_out, m_mat.rows());
|
||||
y.noalias() = m_mat * x;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SPARSE_GEN_MAT_PROD_H
|
|
@ -0,0 +1,93 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SPARSE_GEN_REAL_SHIFT_SOLVE_H
|
||||
#define SPARSE_GEN_REAL_SHIFT_SOLVE_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/SparseCore>
|
||||
#include <Eigen/SparseLU>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup MatOp
|
||||
///
|
||||
/// This class defines the shift-solve operation on a sparse real matrix \f$A\f$,
|
||||
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
|
||||
/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver.
|
||||
///
|
||||
template <typename Scalar, int Flags = 0, typename StorageIndex = int>
|
||||
class SparseGenRealShiftSolve
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
|
||||
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
|
||||
|
||||
ConstGenericSparseMatrix m_mat;
|
||||
const int m_n;
|
||||
Eigen::SparseLU<SparseMatrix> m_solver;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param mat An **Eigen** sparse matrix object, whose type can be
|
||||
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
|
||||
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
|
||||
///
|
||||
SparseGenRealShiftSolve(ConstGenericSparseMatrix& mat) :
|
||||
m_mat(mat), m_n(mat.rows())
|
||||
{
|
||||
if (mat.rows() != mat.cols())
|
||||
throw std::invalid_argument("SparseGenRealShiftSolve: matrix must be square");
|
||||
}
|
||||
|
||||
///
|
||||
/// Return the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_n; }
|
||||
///
|
||||
/// Return the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_n; }
|
||||
|
||||
///
|
||||
/// Set the real shift \f$\sigma\f$.
|
||||
///
|
||||
void set_shift(Scalar sigma)
|
||||
{
|
||||
SparseMatrix I(m_n, m_n);
|
||||
I.setIdentity();
|
||||
|
||||
m_solver.compute(m_mat - sigma * I);
|
||||
if (m_solver.info() != Eigen::Success)
|
||||
throw std::invalid_argument("SparseGenRealShiftSolve: factorization failed with the given shift");
|
||||
}
|
||||
|
||||
///
|
||||
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = inv(A - sigma * I) * x_in
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_n);
|
||||
MapVec y(y_out, m_n);
|
||||
y.noalias() = m_solver.solve(x);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SPARSE_GEN_REAL_SHIFT_SOLVE_H
|
|
@ -0,0 +1,100 @@
|
|||
// Copyright (C) 2017-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SPARSE_REGULAR_INVERSE_H
|
||||
#define SPARSE_REGULAR_INVERSE_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/SparseCore>
|
||||
#include <Eigen/IterativeLinearSolvers>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup MatOp
|
||||
///
|
||||
/// This class defines matrix operations required by the generalized eigen solver
|
||||
/// in the regular inverse mode. For a sparse and positive definite matrix \f$B\f$,
|
||||
/// it implements the matrix-vector product \f$y=Bx\f$ and the linear equation
|
||||
/// solving operation \f$y=B^{-1}x\f$.
|
||||
///
|
||||
/// This class is intended to be used with the SymGEigsSolver generalized eigen solver
|
||||
/// in the regular inverse mode.
|
||||
///
|
||||
template <typename Scalar, int Uplo = Eigen::Lower, int Flags = 0, typename StorageIndex = int>
|
||||
class SparseRegularInverse
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
|
||||
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
|
||||
|
||||
ConstGenericSparseMatrix m_mat;
|
||||
const int m_n;
|
||||
Eigen::ConjugateGradient<SparseMatrix> m_cg;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param mat An **Eigen** sparse matrix object, whose type can be
|
||||
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
|
||||
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
|
||||
///
|
||||
SparseRegularInverse(ConstGenericSparseMatrix& mat) :
|
||||
m_mat(mat), m_n(mat.rows())
|
||||
{
|
||||
if (mat.rows() != mat.cols())
|
||||
throw std::invalid_argument("SparseRegularInverse: matrix must be square");
|
||||
|
||||
m_cg.compute(mat);
|
||||
}
|
||||
|
||||
///
|
||||
/// Return the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_n; }
|
||||
///
|
||||
/// Return the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_n; }
|
||||
|
||||
///
|
||||
/// Perform the solving operation \f$y=B^{-1}x\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = inv(B) * x_in
|
||||
void solve(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_n);
|
||||
MapVec y(y_out, m_n);
|
||||
y.noalias() = m_cg.solve(x);
|
||||
}
|
||||
|
||||
///
|
||||
/// Perform the matrix-vector multiplication operation \f$y=Bx\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = B * x_in
|
||||
void mat_prod(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_n);
|
||||
MapVec y(y_out, m_n);
|
||||
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SPARSE_REGULAR_INVERSE_H
|
|
@ -0,0 +1,73 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SPARSE_SYM_MAT_PROD_H
|
||||
#define SPARSE_SYM_MAT_PROD_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/SparseCore>
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup MatOp
|
||||
///
|
||||
/// This class defines the matrix-vector multiplication operation on a
|
||||
/// sparse real symmetric matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
|
||||
/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver.
|
||||
///
|
||||
template <typename Scalar, int Uplo = Eigen::Lower, int Flags = 0, typename StorageIndex = int>
|
||||
class SparseSymMatProd
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
|
||||
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
|
||||
|
||||
ConstGenericSparseMatrix m_mat;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param mat An **Eigen** sparse matrix object, whose type can be
|
||||
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
|
||||
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
|
||||
///
|
||||
SparseSymMatProd(ConstGenericSparseMatrix& mat) :
|
||||
m_mat(mat)
|
||||
{}
|
||||
|
||||
///
|
||||
/// Return the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_mat.rows(); }
|
||||
///
|
||||
/// Return the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_mat.cols(); }
|
||||
|
||||
///
|
||||
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = A * x_in
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_mat.cols());
|
||||
MapVec y(y_out, m_mat.rows());
|
||||
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SPARSE_SYM_MAT_PROD_H
|
|
@ -0,0 +1,95 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SPARSE_SYM_SHIFT_SOLVE_H
|
||||
#define SPARSE_SYM_SHIFT_SOLVE_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/SparseCore>
|
||||
#include <Eigen/SparseLU>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup MatOp
|
||||
///
|
||||
/// This class defines the shift-solve operation on a sparse real symmetric matrix \f$A\f$,
|
||||
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
|
||||
/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver.
|
||||
///
|
||||
template <typename Scalar, int Uplo = Eigen::Lower, int Flags = 0, typename StorageIndex = int>
|
||||
class SparseSymShiftSolve
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef Eigen::SparseMatrix<Scalar, Flags, StorageIndex> SparseMatrix;
|
||||
typedef const Eigen::Ref<const SparseMatrix> ConstGenericSparseMatrix;
|
||||
|
||||
ConstGenericSparseMatrix m_mat;
|
||||
const int m_n;
|
||||
Eigen::SparseLU<SparseMatrix> m_solver;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param mat An **Eigen** sparse matrix object, whose type can be
|
||||
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
|
||||
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
|
||||
///
|
||||
SparseSymShiftSolve(ConstGenericSparseMatrix& mat) :
|
||||
m_mat(mat), m_n(mat.rows())
|
||||
{
|
||||
if (mat.rows() != mat.cols())
|
||||
throw std::invalid_argument("SparseSymShiftSolve: matrix must be square");
|
||||
}
|
||||
|
||||
///
|
||||
/// Return the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_n; }
|
||||
///
|
||||
/// Return the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_n; }
|
||||
|
||||
///
|
||||
/// Set the real shift \f$\sigma\f$.
|
||||
///
|
||||
void set_shift(Scalar sigma)
|
||||
{
|
||||
SparseMatrix mat = m_mat.template selfadjointView<Uplo>();
|
||||
SparseMatrix identity(m_n, m_n);
|
||||
identity.setIdentity();
|
||||
mat = mat - sigma * identity;
|
||||
m_solver.isSymmetric(true);
|
||||
m_solver.compute(mat);
|
||||
if (m_solver.info() != Eigen::Success)
|
||||
throw std::invalid_argument("SparseSymShiftSolve: factorization failed with the given shift");
|
||||
}
|
||||
|
||||
///
|
||||
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = inv(A - sigma * I) * x_in
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out) const
|
||||
{
|
||||
MapConstVec x(x_in, m_n);
|
||||
MapVec y(y_out, m_n);
|
||||
y.noalias() = m_solver.solve(x);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SPARSE_SYM_SHIFT_SOLVE_H
|
|
@ -0,0 +1,150 @@
|
|||
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef ARNOLDI_OP_H
|
||||
#define ARNOLDI_OP_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <cmath> // std::sqrt
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup Internals
|
||||
/// @{
|
||||
///
|
||||
|
||||
///
|
||||
/// \defgroup Operators Operators
|
||||
///
|
||||
/// Different types of operators.
|
||||
///
|
||||
|
||||
///
|
||||
/// \ingroup Operators
|
||||
///
|
||||
/// Operators used in the Arnoldi factorization.
|
||||
///
|
||||
template <typename Scalar, typename OpType, typename BOpType>
|
||||
class ArnoldiOp
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
|
||||
OpType& m_op;
|
||||
BOpType& m_Bop;
|
||||
Vector m_cache;
|
||||
|
||||
public:
|
||||
ArnoldiOp(OpType* op, BOpType* Bop) :
|
||||
m_op(*op), m_Bop(*Bop), m_cache(op->rows())
|
||||
{}
|
||||
|
||||
inline Index rows() const { return m_op.rows(); }
|
||||
|
||||
// In generalized eigenvalue problem Ax=lambda*Bx, define the inner product to be <x, y> = x'By.
|
||||
// For regular eigenvalue problems, it is the usual inner product <x, y> = x'y
|
||||
|
||||
// Compute <x, y> = x'By
|
||||
// x and y are two vectors
|
||||
template <typename Arg1, typename Arg2>
|
||||
Scalar inner_product(const Arg1& x, const Arg2& y)
|
||||
{
|
||||
m_Bop.mat_prod(y.data(), m_cache.data());
|
||||
return x.dot(m_cache);
|
||||
}
|
||||
|
||||
// Compute res = <X, y> = X'By
|
||||
// X is a matrix, y is a vector, res is a vector
|
||||
template <typename Arg1, typename Arg2>
|
||||
void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref<Vector> res)
|
||||
{
|
||||
m_Bop.mat_prod(y.data(), m_cache.data());
|
||||
res.noalias() = x.transpose() * m_cache;
|
||||
}
|
||||
|
||||
// B-norm of a vector, ||x||_B = sqrt(x'Bx)
|
||||
template <typename Arg>
|
||||
Scalar norm(const Arg& x)
|
||||
{
|
||||
using std::sqrt;
|
||||
return sqrt(inner_product<Arg, Arg>(x, x));
|
||||
}
|
||||
|
||||
// The "A" operator to generate the Krylov subspace
|
||||
inline void perform_op(const Scalar* x_in, Scalar* y_out)
|
||||
{
|
||||
m_op.perform_op(x_in, y_out);
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// \ingroup Operators
|
||||
///
|
||||
/// Placeholder for the B-operator when \f$B = I\f$.
|
||||
///
|
||||
class IdentityBOp
|
||||
{};
|
||||
|
||||
///
|
||||
/// \ingroup Operators
|
||||
///
|
||||
/// Partial specialization for the case \f$B = I\f$.
|
||||
///
|
||||
template <typename Scalar, typename OpType>
|
||||
class ArnoldiOp<Scalar, OpType, IdentityBOp>
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
|
||||
OpType& m_op;
|
||||
|
||||
public:
|
||||
ArnoldiOp<Scalar, OpType, IdentityBOp>(OpType* op, IdentityBOp* /*Bop*/) :
|
||||
m_op(*op)
|
||||
{}
|
||||
|
||||
inline Index rows() const { return m_op.rows(); }
|
||||
|
||||
// Compute <x, y> = x'y
|
||||
// x and y are two vectors
|
||||
template <typename Arg1, typename Arg2>
|
||||
Scalar inner_product(const Arg1& x, const Arg2& y) const
|
||||
{
|
||||
return x.dot(y);
|
||||
}
|
||||
|
||||
// Compute res = <X, y> = X'y
|
||||
// X is a matrix, y is a vector, res is a vector
|
||||
template <typename Arg1, typename Arg2>
|
||||
void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref<Vector> res) const
|
||||
{
|
||||
res.noalias() = x.transpose() * y;
|
||||
}
|
||||
|
||||
// B-norm of a vector. For regular eigenvalue problems it is simply the L2 norm
|
||||
template <typename Arg>
|
||||
Scalar norm(const Arg& x)
|
||||
{
|
||||
return x.norm();
|
||||
}
|
||||
|
||||
// The "A" operator to generate the Krylov subspace
|
||||
inline void perform_op(const Scalar* x_in, Scalar* y_out)
|
||||
{
|
||||
m_op.perform_op(x_in, y_out);
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// @}
|
||||
///
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // ARNOLDI_OP_H
|
|
@ -0,0 +1,75 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SYM_GEIGS_CHOLESKY_OP_H
|
||||
#define SYM_GEIGS_CHOLESKY_OP_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include "../DenseSymMatProd.h"
|
||||
#include "../DenseCholesky.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup Operators
|
||||
///
|
||||
/// This class defines the matrix operation for generalized eigen solver in the
|
||||
/// Cholesky decomposition mode. It calculates \f$y=L^{-1}A(L')^{-1}x\f$ for any
|
||||
/// vector \f$x\f$, where \f$L\f$ is the Cholesky decomposition of \f$B\f$.
|
||||
/// This class is intended for internal use.
|
||||
///
|
||||
template <typename Scalar = double,
|
||||
typename OpType = DenseSymMatProd<double>,
|
||||
typename BOpType = DenseCholesky<double> >
|
||||
class SymGEigsCholeskyOp
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
|
||||
OpType& m_op;
|
||||
BOpType& m_Bop;
|
||||
Vector m_cache; // temporary working space
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param op Pointer to the \f$A\f$ matrix operation object.
|
||||
/// \param Bop Pointer to the \f$B\f$ matrix operation object.
|
||||
///
|
||||
SymGEigsCholeskyOp(OpType& op, BOpType& Bop) :
|
||||
m_op(op), m_Bop(Bop), m_cache(op.rows())
|
||||
{}
|
||||
|
||||
///
|
||||
/// Return the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_Bop.rows(); }
|
||||
///
|
||||
/// Return the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_Bop.rows(); }
|
||||
|
||||
///
|
||||
/// Perform the matrix operation \f$y=L^{-1}A(L')^{-1}x\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = inv(L) * A * inv(L') * x_in
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out)
|
||||
{
|
||||
m_Bop.upper_triangular_solve(x_in, y_out);
|
||||
m_op.perform_op(y_out, m_cache.data());
|
||||
m_Bop.lower_triangular_solve(m_cache.data(), y_out);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SYM_GEIGS_CHOLESKY_OP_H
|
|
@ -0,0 +1,72 @@
|
|||
// Copyright (C) 2017-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SYM_GEIGS_REG_INV_OP_H
|
||||
#define SYM_GEIGS_REG_INV_OP_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include "../SparseSymMatProd.h"
|
||||
#include "../SparseRegularInverse.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup Operators
|
||||
///
|
||||
/// This class defines the matrix operation for generalized eigen solver in the
|
||||
/// regular inverse mode. This class is intended for internal use.
|
||||
///
|
||||
template <typename Scalar = double,
|
||||
typename OpType = SparseSymMatProd<double>,
|
||||
typename BOpType = SparseRegularInverse<double> >
|
||||
class SymGEigsRegInvOp
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
|
||||
OpType& m_op;
|
||||
BOpType& m_Bop;
|
||||
Vector m_cache; // temporary working space
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create the matrix operation object.
|
||||
///
|
||||
/// \param op Pointer to the \f$A\f$ matrix operation object.
|
||||
/// \param Bop Pointer to the \f$B\f$ matrix operation object.
|
||||
///
|
||||
SymGEigsRegInvOp(OpType& op, BOpType& Bop) :
|
||||
m_op(op), m_Bop(Bop), m_cache(op.rows())
|
||||
{}
|
||||
|
||||
///
|
||||
/// Return the number of rows of the underlying matrix.
|
||||
///
|
||||
Index rows() const { return m_Bop.rows(); }
|
||||
///
|
||||
/// Return the number of columns of the underlying matrix.
|
||||
///
|
||||
Index cols() const { return m_Bop.rows(); }
|
||||
|
||||
///
|
||||
/// Perform the matrix operation \f$y=B^{-1}Ax\f$.
|
||||
///
|
||||
/// \param x_in Pointer to the \f$x\f$ vector.
|
||||
/// \param y_out Pointer to the \f$y\f$ vector.
|
||||
///
|
||||
// y_out = inv(B) * A * x_in
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out)
|
||||
{
|
||||
m_op.perform_op(x_in, m_cache.data());
|
||||
m_Bop.solve(m_cache.data(), y_out);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SYM_GEIGS_REG_INV_OP_H
|
|
@ -0,0 +1,448 @@
|
|||
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SYM_EIGS_BASE_H
|
||||
#define SYM_EIGS_BASE_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <vector> // std::vector
|
||||
#include <cmath> // std::abs, std::pow, std::sqrt
|
||||
#include <algorithm> // std::min, std::copy
|
||||
#include <stdexcept> // std::invalid_argument
|
||||
|
||||
#include "Util/TypeTraits.h"
|
||||
#include "Util/SelectionRule.h"
|
||||
#include "Util/CompInfo.h"
|
||||
#include "Util/SimpleRandom.h"
|
||||
#include "MatOp/internal/ArnoldiOp.h"
|
||||
#include "LinAlg/UpperHessenbergQR.h"
|
||||
#include "LinAlg/TridiagEigen.h"
|
||||
#include "LinAlg/Lanczos.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \defgroup EigenSolver Eigen Solvers
|
||||
///
|
||||
/// Eigen solvers for different types of problems.
|
||||
///
|
||||
|
||||
///
|
||||
/// \ingroup EigenSolver
|
||||
///
|
||||
/// This is the base class for symmetric eigen solvers, mainly for internal use.
|
||||
/// It is kept here to provide the documentation for member functions of concrete eigen solvers
|
||||
/// such as SymEigsSolver and SymEigsShiftSolver.
|
||||
///
|
||||
template <typename Scalar,
|
||||
int SelectionRule,
|
||||
typename OpType,
|
||||
typename BOpType>
|
||||
class SymEigsBase
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> Array;
|
||||
typedef Eigen::Array<bool, Eigen::Dynamic, 1> BoolArray;
|
||||
typedef Eigen::Map<Matrix> MapMat;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
|
||||
typedef ArnoldiOp<Scalar, OpType, BOpType> ArnoldiOpType;
|
||||
typedef Lanczos<Scalar, ArnoldiOpType> LanczosFac;
|
||||
|
||||
protected:
|
||||
// clang-format off
|
||||
OpType* m_op; // object to conduct matrix operation,
|
||||
// e.g. matrix-vector product
|
||||
const Index m_n; // dimension of matrix A
|
||||
const Index m_nev; // number of eigenvalues requested
|
||||
const Index m_ncv; // dimension of Krylov subspace in the Lanczos method
|
||||
Index m_nmatop; // number of matrix operations called
|
||||
Index m_niter; // number of restarting iterations
|
||||
|
||||
LanczosFac m_fac; // Lanczos factorization
|
||||
Vector m_ritz_val; // Ritz values
|
||||
|
||||
private:
|
||||
Matrix m_ritz_vec; // Ritz vectors
|
||||
Vector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates
|
||||
BoolArray m_ritz_conv; // indicator of the convergence of Ritz values
|
||||
int m_info; // status of the computation
|
||||
|
||||
const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow
|
||||
// ~= 1e-307 for the "double" type
|
||||
const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type
|
||||
const Scalar m_eps23; // m_eps^(2/3), used to test the convergence
|
||||
// clang-format on
|
||||
|
||||
// Implicitly restarted Lanczos factorization
|
||||
void restart(Index k)
|
||||
{
|
||||
if (k >= m_ncv)
|
||||
return;
|
||||
|
||||
TridiagQR<Scalar> decomp(m_ncv);
|
||||
Matrix Q = Matrix::Identity(m_ncv, m_ncv);
|
||||
|
||||
for (Index i = k; i < m_ncv; i++)
|
||||
{
|
||||
// QR decomposition of H-mu*I, mu is the shift
|
||||
decomp.compute(m_fac.matrix_H(), m_ritz_val[i]);
|
||||
|
||||
// Q -> Q * Qi
|
||||
decomp.apply_YQ(Q);
|
||||
// H -> Q'HQ
|
||||
// Since QR = H - mu * I, we have H = QR + mu * I
|
||||
// and therefore Q'HQ = RQ + mu * I
|
||||
m_fac.compress_H(decomp);
|
||||
}
|
||||
|
||||
m_fac.compress_V(Q);
|
||||
m_fac.factorize_from(k, m_ncv, m_nmatop);
|
||||
|
||||
retrieve_ritzpair();
|
||||
}
|
||||
|
||||
// Calculates the number of converged Ritz values
|
||||
Index num_converged(Scalar tol)
|
||||
{
|
||||
// thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value
|
||||
Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23);
|
||||
Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm();
|
||||
// Converged "wanted" Ritz values
|
||||
m_ritz_conv = (resid < thresh);
|
||||
|
||||
return m_ritz_conv.cast<Index>().sum();
|
||||
}
|
||||
|
||||
// Returns the adjusted nev for restarting
|
||||
Index nev_adjusted(Index nconv)
|
||||
{
|
||||
using std::abs;
|
||||
|
||||
Index nev_new = m_nev;
|
||||
for (Index i = m_nev; i < m_ncv; i++)
|
||||
if (abs(m_ritz_est[i]) < m_near_0)
|
||||
nev_new++;
|
||||
|
||||
// Adjust nev_new, according to dsaup2.f line 677~684 in ARPACK
|
||||
nev_new += std::min(nconv, (m_ncv - nev_new) / 2);
|
||||
if (nev_new == 1 && m_ncv >= 6)
|
||||
nev_new = m_ncv / 2;
|
||||
else if (nev_new == 1 && m_ncv > 2)
|
||||
nev_new = 2;
|
||||
|
||||
if (nev_new > m_ncv - 1)
|
||||
nev_new = m_ncv - 1;
|
||||
|
||||
return nev_new;
|
||||
}
|
||||
|
||||
// Retrieves and sorts Ritz values and Ritz vectors
|
||||
void retrieve_ritzpair()
|
||||
{
|
||||
TridiagEigen<Scalar> decomp(m_fac.matrix_H());
|
||||
const Vector& evals = decomp.eigenvalues();
|
||||
const Matrix& evecs = decomp.eigenvectors();
|
||||
|
||||
SortEigenvalue<Scalar, SelectionRule> sorting(evals.data(), evals.size());
|
||||
std::vector<int> ind = sorting.index();
|
||||
|
||||
// For BOTH_ENDS, the eigenvalues are sorted according
|
||||
// to the LARGEST_ALGE rule, so we need to move those smallest
|
||||
// values to the left
|
||||
// The order would be
|
||||
// Largest => Smallest => 2nd largest => 2nd smallest => ...
|
||||
// We keep this order since the first k values will always be
|
||||
// the wanted collection, no matter k is nev_updated (used in restart())
|
||||
// or is nev (used in sort_ritzpair())
|
||||
if (SelectionRule == BOTH_ENDS)
|
||||
{
|
||||
std::vector<int> ind_copy(ind);
|
||||
for (Index i = 0; i < m_ncv; i++)
|
||||
{
|
||||
// If i is even, pick values from the left (large values)
|
||||
// If i is odd, pick values from the right (small values)
|
||||
if (i % 2 == 0)
|
||||
ind[i] = ind_copy[i / 2];
|
||||
else
|
||||
ind[i] = ind_copy[m_ncv - 1 - i / 2];
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively
|
||||
for (Index i = 0; i < m_ncv; i++)
|
||||
{
|
||||
m_ritz_val[i] = evals[ind[i]];
|
||||
m_ritz_est[i] = evecs(m_ncv - 1, ind[i]);
|
||||
}
|
||||
for (Index i = 0; i < m_nev; i++)
|
||||
{
|
||||
m_ritz_vec.col(i).noalias() = evecs.col(ind[i]);
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
// Sorts the first nev Ritz pairs in the specified order
|
||||
// This is used to return the final results
|
||||
virtual void sort_ritzpair(int sort_rule)
|
||||
{
|
||||
// First make sure that we have a valid index vector
|
||||
SortEigenvalue<Scalar, LARGEST_ALGE> sorting(m_ritz_val.data(), m_nev);
|
||||
std::vector<int> ind = sorting.index();
|
||||
|
||||
switch (sort_rule)
|
||||
{
|
||||
case LARGEST_ALGE:
|
||||
break;
|
||||
case LARGEST_MAGN:
|
||||
{
|
||||
SortEigenvalue<Scalar, LARGEST_MAGN> sorting(m_ritz_val.data(), m_nev);
|
||||
ind = sorting.index();
|
||||
break;
|
||||
}
|
||||
case SMALLEST_ALGE:
|
||||
{
|
||||
SortEigenvalue<Scalar, SMALLEST_ALGE> sorting(m_ritz_val.data(), m_nev);
|
||||
ind = sorting.index();
|
||||
break;
|
||||
}
|
||||
case SMALLEST_MAGN:
|
||||
{
|
||||
SortEigenvalue<Scalar, SMALLEST_MAGN> sorting(m_ritz_val.data(), m_nev);
|
||||
ind = sorting.index();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw std::invalid_argument("unsupported sorting rule");
|
||||
}
|
||||
|
||||
Vector new_ritz_val(m_ncv);
|
||||
Matrix new_ritz_vec(m_ncv, m_nev);
|
||||
BoolArray new_ritz_conv(m_nev);
|
||||
|
||||
for (Index i = 0; i < m_nev; i++)
|
||||
{
|
||||
new_ritz_val[i] = m_ritz_val[ind[i]];
|
||||
new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]);
|
||||
new_ritz_conv[i] = m_ritz_conv[ind[i]];
|
||||
}
|
||||
|
||||
m_ritz_val.swap(new_ritz_val);
|
||||
m_ritz_vec.swap(new_ritz_vec);
|
||||
m_ritz_conv.swap(new_ritz_conv);
|
||||
}
|
||||
|
||||
public:
|
||||
/// \cond
|
||||
|
||||
SymEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) :
|
||||
m_op(op),
|
||||
m_n(m_op->rows()),
|
||||
m_nev(nev),
|
||||
m_ncv(ncv > m_n ? m_n : ncv),
|
||||
m_nmatop(0),
|
||||
m_niter(0),
|
||||
m_fac(ArnoldiOpType(op, Bop), m_ncv),
|
||||
m_info(NOT_COMPUTED),
|
||||
m_near_0(TypeTraits<Scalar>::min() * Scalar(10)),
|
||||
m_eps(Eigen::NumTraits<Scalar>::epsilon()),
|
||||
m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3))
|
||||
{
|
||||
if (nev < 1 || nev > m_n - 1)
|
||||
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix");
|
||||
|
||||
if (ncv <= nev || ncv > m_n)
|
||||
throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix");
|
||||
}
|
||||
|
||||
///
|
||||
/// Virtual destructor
|
||||
///
|
||||
virtual ~SymEigsBase() {}
|
||||
|
||||
/// \endcond
|
||||
|
||||
///
|
||||
/// Initializes the solver by providing an initial residual vector.
|
||||
///
|
||||
/// \param init_resid Pointer to the initial residual vector.
|
||||
///
|
||||
/// **Spectra** (and also **ARPACK**) uses an iterative algorithm
|
||||
/// to find eigenvalues. This function allows the user to provide the initial
|
||||
/// residual vector.
|
||||
///
|
||||
void init(const Scalar* init_resid)
|
||||
{
|
||||
// Reset all matrices/vectors to zero
|
||||
m_ritz_val.resize(m_ncv);
|
||||
m_ritz_vec.resize(m_ncv, m_nev);
|
||||
m_ritz_est.resize(m_ncv);
|
||||
m_ritz_conv.resize(m_nev);
|
||||
|
||||
m_ritz_val.setZero();
|
||||
m_ritz_vec.setZero();
|
||||
m_ritz_est.setZero();
|
||||
m_ritz_conv.setZero();
|
||||
|
||||
m_nmatop = 0;
|
||||
m_niter = 0;
|
||||
|
||||
// Initialize the Lanczos factorization
|
||||
MapConstVec v0(init_resid, m_n);
|
||||
m_fac.init(v0, m_nmatop);
|
||||
}
|
||||
|
||||
///
|
||||
/// Initializes the solver by providing a random initial residual vector.
|
||||
///
|
||||
/// This overloaded function generates a random initial residual vector
|
||||
/// (with a fixed random seed) for the algorithm. Elements in the vector
|
||||
/// follow independent Uniform(-0.5, 0.5) distribution.
|
||||
///
|
||||
void init()
|
||||
{
|
||||
SimpleRandom<Scalar> rng(0);
|
||||
Vector init_resid = rng.random_vec(m_n);
|
||||
init(init_resid.data());
|
||||
}
|
||||
|
||||
///
|
||||
/// Conducts the major computation procedure.
|
||||
///
|
||||
/// \param maxit Maximum number of iterations allowed in the algorithm.
|
||||
/// \param tol Precision parameter for the calculated eigenvalues.
|
||||
/// \param sort_rule Rule to sort the eigenvalues and eigenvectors.
|
||||
/// Supported values are
|
||||
/// `Spectra::LARGEST_ALGE`, `Spectra::LARGEST_MAGN`,
|
||||
/// `Spectra::SMALLEST_ALGE` and `Spectra::SMALLEST_MAGN`,
|
||||
/// for example `LARGEST_ALGE` indicates that largest eigenvalues
|
||||
/// come first. Note that this argument is only used to
|
||||
/// **sort** the final result, and the **selection** rule
|
||||
/// (e.g. selecting the largest or smallest eigenvalues in the
|
||||
/// full spectrum) is specified by the template parameter
|
||||
/// `SelectionRule` of SymEigsSolver.
|
||||
///
|
||||
/// \return Number of converged eigenvalues.
|
||||
///
|
||||
Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_ALGE)
|
||||
{
|
||||
// The m-step Lanczos factorization
|
||||
m_fac.factorize_from(1, m_ncv, m_nmatop);
|
||||
retrieve_ritzpair();
|
||||
// Restarting
|
||||
Index i, nconv = 0, nev_adj;
|
||||
for (i = 0; i < maxit; i++)
|
||||
{
|
||||
nconv = num_converged(tol);
|
||||
if (nconv >= m_nev)
|
||||
break;
|
||||
|
||||
nev_adj = nev_adjusted(nconv);
|
||||
restart(nev_adj);
|
||||
}
|
||||
// Sorting results
|
||||
sort_ritzpair(sort_rule);
|
||||
|
||||
m_niter += i + 1;
|
||||
m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING;
|
||||
|
||||
return std::min(m_nev, nconv);
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the status of the computation.
|
||||
/// The full list of enumeration values can be found in \ref Enumerations.
|
||||
///
|
||||
int info() const { return m_info; }
|
||||
|
||||
///
|
||||
/// Returns the number of iterations used in the computation.
|
||||
///
|
||||
Index num_iterations() const { return m_niter; }
|
||||
|
||||
///
|
||||
/// Returns the number of matrix operations used in the computation.
|
||||
///
|
||||
Index num_operations() const { return m_nmatop; }
|
||||
|
||||
///
|
||||
/// Returns the converged eigenvalues.
|
||||
///
|
||||
/// \return A vector containing the eigenvalues.
|
||||
/// Returned vector type will be `Eigen::Vector<Scalar, ...>`, depending on
|
||||
/// the template parameter `Scalar` defined.
|
||||
///
|
||||
Vector eigenvalues() const
|
||||
{
|
||||
const Index nconv = m_ritz_conv.cast<Index>().sum();
|
||||
Vector res(nconv);
|
||||
|
||||
if (!nconv)
|
||||
return res;
|
||||
|
||||
Index j = 0;
|
||||
for (Index i = 0; i < m_nev; i++)
|
||||
{
|
||||
if (m_ritz_conv[i])
|
||||
{
|
||||
res[j] = m_ritz_val[i];
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the eigenvectors associated with the converged eigenvalues.
|
||||
///
|
||||
/// \param nvec The number of eigenvectors to return.
|
||||
///
|
||||
/// \return A matrix containing the eigenvectors.
|
||||
/// Returned matrix type will be `Eigen::Matrix<Scalar, ...>`,
|
||||
/// depending on the template parameter `Scalar` defined.
|
||||
///
|
||||
virtual Matrix eigenvectors(Index nvec) const
|
||||
{
|
||||
const Index nconv = m_ritz_conv.cast<Index>().sum();
|
||||
nvec = std::min(nvec, nconv);
|
||||
Matrix res(m_n, nvec);
|
||||
|
||||
if (!nvec)
|
||||
return res;
|
||||
|
||||
Matrix ritz_vec_conv(m_ncv, nvec);
|
||||
Index j = 0;
|
||||
for (Index i = 0; i < m_nev && j < nvec; i++)
|
||||
{
|
||||
if (m_ritz_conv[i])
|
||||
{
|
||||
ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i);
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
res.noalias() = m_fac.matrix_V() * ritz_vec_conv;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns all converged eigenvectors.
|
||||
///
|
||||
virtual Matrix eigenvectors() const
|
||||
{
|
||||
return eigenvectors(m_nev);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SYM_EIGS_BASE_H
|
|
@ -0,0 +1,203 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SYM_EIGS_SHIFT_SOLVER_H
|
||||
#define SYM_EIGS_SHIFT_SOLVER_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
#include "SymEigsBase.h"
|
||||
#include "Util/SelectionRule.h"
|
||||
#include "MatOp/DenseSymShiftSolve.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup EigenSolver
|
||||
///
|
||||
/// This class implements the eigen solver for real symmetric matrices using
|
||||
/// the **shift-and-invert mode**. The background information of the symmetric
|
||||
/// eigen solver is documented in the SymEigsSolver class. Here we focus on
|
||||
/// explaining the shift-and-invert mode.
|
||||
///
|
||||
/// The shift-and-invert mode is based on the following fact:
|
||||
/// If \f$\lambda\f$ and \f$x\f$ are a pair of eigenvalue and eigenvector of
|
||||
/// matrix \f$A\f$, such that \f$Ax=\lambda x\f$, then for any \f$\sigma\f$,
|
||||
/// we have
|
||||
/// \f[(A-\sigma I)^{-1}x=\nu x\f]
|
||||
/// where
|
||||
/// \f[\nu=\frac{1}{\lambda-\sigma}\f]
|
||||
/// which indicates that \f$(\nu, x)\f$ is an eigenpair of the matrix
|
||||
/// \f$(A-\sigma I)^{-1}\f$.
|
||||
///
|
||||
/// Therefore, if we pass the matrix operation \f$(A-\sigma I)^{-1}y\f$
|
||||
/// (rather than \f$Ay\f$) to the eigen solver, then we would get the desired
|
||||
/// values of \f$\nu\f$, and \f$\lambda\f$ can also be easily obtained by noting
|
||||
/// that \f$\lambda=\sigma+\nu^{-1}\f$.
|
||||
///
|
||||
/// The reason why we need this type of manipulation is that
|
||||
/// the algorithm of **Spectra** (and also **ARPACK**)
|
||||
/// is good at finding eigenvalues with large magnitude, but may fail in looking
|
||||
/// for eigenvalues that are close to zero. However, if we really need them, we
|
||||
/// can set \f$\sigma=0\f$, find the largest eigenvalues of \f$A^{-1}\f$, and then
|
||||
/// transform back to \f$\lambda\f$, since in this case largest values of \f$\nu\f$
|
||||
/// implies smallest values of \f$\lambda\f$.
|
||||
///
|
||||
/// To summarize, in the shift-and-invert mode, the selection rule will apply to
|
||||
/// \f$\nu=1/(\lambda-\sigma)\f$ rather than \f$\lambda\f$. So a selection rule
|
||||
/// of `LARGEST_MAGN` combined with shift \f$\sigma\f$ will find eigenvalues of
|
||||
/// \f$A\f$ that are closest to \f$\sigma\f$. But note that the eigenvalues()
|
||||
/// method will always return the eigenvalues in the original problem (i.e.,
|
||||
/// returning \f$\lambda\f$ rather than \f$\nu\f$), and eigenvectors are the
|
||||
/// same for both the original problem and the shifted-and-inverted problem.
|
||||
///
|
||||
/// \tparam Scalar The element type of the matrix.
|
||||
/// Currently supported types are `float`, `double` and `long double`.
|
||||
/// \tparam SelectionRule An enumeration value indicating the selection rule of
|
||||
/// the shifted-and-inverted eigenvalues.
|
||||
/// The full list of enumeration values can be found in
|
||||
/// \ref Enumerations.
|
||||
/// \tparam OpType The name of the matrix operation class. Users could either
|
||||
/// use the wrapper classes such as DenseSymShiftSolve and
|
||||
/// SparseSymShiftSolve, or define their
|
||||
/// own that implements all the public member functions as in
|
||||
/// DenseSymShiftSolve.
|
||||
///
|
||||
/// Below is an example that illustrates the use of the shift-and-invert mode:
|
||||
///
|
||||
/// \code{.cpp}
|
||||
/// #include <Eigen/Core>
|
||||
/// #include <Spectra/SymEigsShiftSolver.h>
|
||||
/// // <Spectra/MatOp/DenseSymShiftSolve.h> is implicitly included
|
||||
/// #include <iostream>
|
||||
///
|
||||
/// using namespace Spectra;
|
||||
///
|
||||
/// int main()
|
||||
/// {
|
||||
/// // A size-10 diagonal matrix with elements 1, 2, ..., 10
|
||||
/// Eigen::MatrixXd M = Eigen::MatrixXd::Zero(10, 10);
|
||||
/// for(int i = 0; i < M.rows(); i++)
|
||||
/// M(i, i) = i + 1;
|
||||
///
|
||||
/// // Construct matrix operation object using the wrapper class
|
||||
/// DenseSymShiftSolve<double> op(M);
|
||||
///
|
||||
/// // Construct eigen solver object with shift 0
|
||||
/// // This will find eigenvalues that are closest to 0
|
||||
/// SymEigsShiftSolver< double, LARGEST_MAGN,
|
||||
/// DenseSymShiftSolve<double> > eigs(&op, 3, 6, 0.0);
|
||||
///
|
||||
/// eigs.init();
|
||||
/// eigs.compute();
|
||||
/// if(eigs.info() == SUCCESSFUL)
|
||||
/// {
|
||||
/// Eigen::VectorXd evalues = eigs.eigenvalues();
|
||||
/// // Will get (3.0, 2.0, 1.0)
|
||||
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
|
||||
/// }
|
||||
///
|
||||
/// return 0;
|
||||
/// }
|
||||
/// \endcode
|
||||
///
|
||||
/// Also an example for user-supplied matrix shift-solve operation class:
|
||||
///
|
||||
/// \code{.cpp}
|
||||
/// #include <Eigen/Core>
|
||||
/// #include <Spectra/SymEigsShiftSolver.h>
|
||||
/// #include <iostream>
|
||||
///
|
||||
/// using namespace Spectra;
|
||||
///
|
||||
/// // M = diag(1, 2, ..., 10)
|
||||
/// class MyDiagonalTenShiftSolve
|
||||
/// {
|
||||
/// private:
|
||||
/// double sigma_;
|
||||
/// public:
|
||||
/// int rows() { return 10; }
|
||||
/// int cols() { return 10; }
|
||||
/// void set_shift(double sigma) { sigma_ = sigma; }
|
||||
/// // y_out = inv(A - sigma * I) * x_in
|
||||
/// // inv(A - sigma * I) = diag(1/(1-sigma), 1/(2-sigma), ...)
|
||||
/// void perform_op(double *x_in, double *y_out)
|
||||
/// {
|
||||
/// for(int i = 0; i < rows(); i++)
|
||||
/// {
|
||||
/// y_out[i] = x_in[i] / (i + 1 - sigma_);
|
||||
/// }
|
||||
/// }
|
||||
/// };
|
||||
///
|
||||
/// int main()
|
||||
/// {
|
||||
/// MyDiagonalTenShiftSolve op;
|
||||
/// // Find three eigenvalues that are closest to 3.14
|
||||
/// SymEigsShiftSolver<double, LARGEST_MAGN,
|
||||
/// MyDiagonalTenShiftSolve> eigs(&op, 3, 6, 3.14);
|
||||
/// eigs.init();
|
||||
/// eigs.compute();
|
||||
/// if(eigs.info() == SUCCESSFUL)
|
||||
/// {
|
||||
/// Eigen::VectorXd evalues = eigs.eigenvalues();
|
||||
/// // Will get (4.0, 3.0, 2.0)
|
||||
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
|
||||
/// }
|
||||
///
|
||||
/// return 0;
|
||||
/// }
|
||||
/// \endcode
|
||||
///
|
||||
template <typename Scalar = double,
|
||||
int SelectionRule = LARGEST_MAGN,
|
||||
typename OpType = DenseSymShiftSolve<double> >
|
||||
class SymEigsShiftSolver : public SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> Array;
|
||||
|
||||
const Scalar m_sigma;
|
||||
|
||||
// First transform back the Ritz values, and then sort
|
||||
void sort_ritzpair(int sort_rule)
|
||||
{
|
||||
Array m_ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma;
|
||||
this->m_ritz_val.head(this->m_nev) = m_ritz_val_org;
|
||||
SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>::sort_ritzpair(sort_rule);
|
||||
}
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create a eigen solver object using the shift-and-invert mode.
|
||||
///
|
||||
/// \param op Pointer to the matrix operation object, which should implement
|
||||
/// the shift-solve operation of \f$A\f$: calculating
|
||||
/// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either
|
||||
/// create the object from the wrapper class such as DenseSymShiftSolve, or
|
||||
/// define their own that implements all the public member functions
|
||||
/// as in DenseSymShiftSolve.
|
||||
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
|
||||
/// where \f$n\f$ is the size of matrix.
|
||||
/// \param ncv Parameter that controls the convergence speed of the algorithm.
|
||||
/// Typically a larger `ncv_` means faster convergence, but it may
|
||||
/// also result in greater memory use and more matrix operations
|
||||
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
|
||||
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
|
||||
/// \param sigma The value of the shift.
|
||||
///
|
||||
SymEigsShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) :
|
||||
SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv),
|
||||
m_sigma(sigma)
|
||||
{
|
||||
this->m_op->set_shift(m_sigma);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SYM_EIGS_SHIFT_SOLVER_H
|
|
@ -0,0 +1,171 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SYM_EIGS_SOLVER_H
|
||||
#define SYM_EIGS_SOLVER_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
#include "SymEigsBase.h"
|
||||
#include "Util/SelectionRule.h"
|
||||
#include "MatOp/DenseSymMatProd.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup EigenSolver
|
||||
///
|
||||
/// This class implements the eigen solver for real symmetric matrices, i.e.,
|
||||
/// to solve \f$Ax=\lambda x\f$ where \f$A\f$ is symmetric.
|
||||
///
|
||||
/// **Spectra** is designed to calculate a specified number (\f$k\f$)
|
||||
/// of eigenvalues of a large square matrix (\f$A\f$). Usually \f$k\f$ is much
|
||||
/// less than the size of the matrix (\f$n\f$), so that only a few eigenvalues
|
||||
/// and eigenvectors are computed.
|
||||
///
|
||||
/// Rather than providing the whole \f$A\f$ matrix, the algorithm only requires
|
||||
/// the matrix-vector multiplication operation of \f$A\f$. Therefore, users of
|
||||
/// this solver need to supply a class that computes the result of \f$Av\f$
|
||||
/// for any given vector \f$v\f$. The name of this class should be given to
|
||||
/// the template parameter `OpType`, and instance of this class passed to
|
||||
/// the constructor of SymEigsSolver.
|
||||
///
|
||||
/// If the matrix \f$A\f$ is already stored as a matrix object in **Eigen**,
|
||||
/// for example `Eigen::MatrixXd`, then there is an easy way to construct such
|
||||
/// matrix operation class, by using the built-in wrapper class DenseSymMatProd
|
||||
/// which wraps an existing matrix object in **Eigen**. This is also the
|
||||
/// default template parameter for SymEigsSolver. For sparse matrices, the
|
||||
/// wrapper class SparseSymMatProd can be used similarly.
|
||||
///
|
||||
/// If the users need to define their own matrix-vector multiplication operation
|
||||
/// class, it should implement all the public member functions as in DenseSymMatProd.
|
||||
///
|
||||
/// \tparam Scalar The element type of the matrix.
|
||||
/// Currently supported types are `float`, `double` and `long double`.
|
||||
/// \tparam SelectionRule An enumeration value indicating the selection rule of
|
||||
/// the requested eigenvalues, for example `LARGEST_MAGN`
|
||||
/// to retrieve eigenvalues with the largest magnitude.
|
||||
/// The full list of enumeration values can be found in
|
||||
/// \ref Enumerations.
|
||||
/// \tparam OpType The name of the matrix operation class. Users could either
|
||||
/// use the wrapper classes such as DenseSymMatProd and
|
||||
/// SparseSymMatProd, or define their
|
||||
/// own that implements all the public member functions as in
|
||||
/// DenseSymMatProd.
|
||||
///
|
||||
/// Below is an example that demonstrates the usage of this class.
|
||||
///
|
||||
/// \code{.cpp}
|
||||
/// #include <Eigen/Core>
|
||||
/// #include <Spectra/SymEigsSolver.h>
|
||||
/// // <Spectra/MatOp/DenseSymMatProd.h> is implicitly included
|
||||
/// #include <iostream>
|
||||
///
|
||||
/// using namespace Spectra;
|
||||
///
|
||||
/// int main()
|
||||
/// {
|
||||
/// // We are going to calculate the eigenvalues of M
|
||||
/// Eigen::MatrixXd A = Eigen::MatrixXd::Random(10, 10);
|
||||
/// Eigen::MatrixXd M = A + A.transpose();
|
||||
///
|
||||
/// // Construct matrix operation object using the wrapper class DenseSymMatProd
|
||||
/// DenseSymMatProd<double> op(M);
|
||||
///
|
||||
/// // Construct eigen solver object, requesting the largest three eigenvalues
|
||||
/// SymEigsSolver< double, LARGEST_ALGE, DenseSymMatProd<double> > eigs(&op, 3, 6);
|
||||
///
|
||||
/// // Initialize and compute
|
||||
/// eigs.init();
|
||||
/// int nconv = eigs.compute();
|
||||
///
|
||||
/// // Retrieve results
|
||||
/// Eigen::VectorXd evalues;
|
||||
/// if(eigs.info() == SUCCESSFUL)
|
||||
/// evalues = eigs.eigenvalues();
|
||||
///
|
||||
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
|
||||
///
|
||||
/// return 0;
|
||||
/// }
|
||||
/// \endcode
|
||||
///
|
||||
/// And here is an example for user-supplied matrix operation class.
|
||||
///
|
||||
/// \code{.cpp}
|
||||
/// #include <Eigen/Core>
|
||||
/// #include <Spectra/SymEigsSolver.h>
|
||||
/// #include <iostream>
|
||||
///
|
||||
/// using namespace Spectra;
|
||||
///
|
||||
/// // M = diag(1, 2, ..., 10)
|
||||
/// class MyDiagonalTen
|
||||
/// {
|
||||
/// public:
|
||||
/// int rows() { return 10; }
|
||||
/// int cols() { return 10; }
|
||||
/// // y_out = M * x_in
|
||||
/// void perform_op(double *x_in, double *y_out)
|
||||
/// {
|
||||
/// for(int i = 0; i < rows(); i++)
|
||||
/// {
|
||||
/// y_out[i] = x_in[i] * (i + 1);
|
||||
/// }
|
||||
/// }
|
||||
/// };
|
||||
///
|
||||
/// int main()
|
||||
/// {
|
||||
/// MyDiagonalTen op;
|
||||
/// SymEigsSolver<double, LARGEST_ALGE, MyDiagonalTen> eigs(&op, 3, 6);
|
||||
/// eigs.init();
|
||||
/// eigs.compute();
|
||||
/// if(eigs.info() == SUCCESSFUL)
|
||||
/// {
|
||||
/// Eigen::VectorXd evalues = eigs.eigenvalues();
|
||||
/// // Will get (10, 9, 8)
|
||||
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
|
||||
/// }
|
||||
///
|
||||
/// return 0;
|
||||
/// }
|
||||
/// \endcode
|
||||
///
|
||||
template <typename Scalar = double,
|
||||
int SelectionRule = LARGEST_MAGN,
|
||||
typename OpType = DenseSymMatProd<double> >
|
||||
class SymEigsSolver : public SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create a solver object.
|
||||
///
|
||||
/// \param op Pointer to the matrix operation object, which should implement
|
||||
/// the matrix-vector multiplication operation of \f$A\f$:
|
||||
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
|
||||
/// create the object from the wrapper class such as DenseSymMatProd, or
|
||||
/// define their own that implements all the public member functions
|
||||
/// as in DenseSymMatProd.
|
||||
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
|
||||
/// where \f$n\f$ is the size of matrix.
|
||||
/// \param ncv Parameter that controls the convergence speed of the algorithm.
|
||||
/// Typically a larger `ncv` means faster convergence, but it may
|
||||
/// also result in greater memory use and more matrix operations
|
||||
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
|
||||
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
|
||||
///
|
||||
SymEigsSolver(OpType* op, Index nev, Index ncv) :
|
||||
SymEigsBase<Scalar, SelectionRule, OpType, IdentityBOp>(op, NULL, nev, ncv)
|
||||
{}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SYM_EIGS_SOLVER_H
|
|
@ -0,0 +1,326 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SYM_GEIGS_SOLVER_H
|
||||
#define SYM_GEIGS_SOLVER_H
|
||||
|
||||
#include "SymEigsBase.h"
|
||||
#include "Util/GEigsMode.h"
|
||||
#include "MatOp/internal/SymGEigsCholeskyOp.h"
|
||||
#include "MatOp/internal/SymGEigsRegInvOp.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \defgroup GEigenSolver Generalized Eigen Solvers
|
||||
///
|
||||
/// Generalized eigen solvers for different types of problems.
|
||||
///
|
||||
|
||||
///
|
||||
/// \ingroup GEigenSolver
|
||||
///
|
||||
/// This class implements the generalized eigen solver for real symmetric
|
||||
/// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ is symmetric and
|
||||
/// \f$B\f$ is positive definite.
|
||||
///
|
||||
/// There are two modes of this solver, specified by the template parameter
|
||||
/// GEigsMode. See the pages for the specialized classes for details.
|
||||
/// - The Cholesky mode assumes that \f$B\f$ can be factorized using Cholesky
|
||||
/// decomposition, which is the preferred mode when the decomposition is
|
||||
/// available. (This can be easily done in Eigen using the dense or sparse
|
||||
/// Cholesky solver.)
|
||||
/// See \ref SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_CHOLESKY> "SymGEigsSolver (Cholesky mode)" for more details.
|
||||
/// - The regular inverse mode requires the matrix-vector product \f$Bv\f$ and the
|
||||
/// linear equation solving operation \f$B^{-1}v\f$. This mode should only be
|
||||
/// used when the Cholesky decomposition of \f$B\f$ is hard to implement, or
|
||||
/// when computing \f$B^{-1}v\f$ is much faster than the Cholesky decomposition.
|
||||
/// See \ref SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_REGULAR_INVERSE> "SymGEigsSolver (Regular inverse mode)" for more details.
|
||||
|
||||
// Empty class template
|
||||
template <typename Scalar,
|
||||
int SelectionRule,
|
||||
typename OpType,
|
||||
typename BOpType,
|
||||
int GEigsMode>
|
||||
class SymGEigsSolver
|
||||
{};
|
||||
|
||||
///
|
||||
/// \ingroup GEigenSolver
|
||||
///
|
||||
/// This class implements the generalized eigen solver for real symmetric
|
||||
/// matrices using Cholesky decomposition, i.e., to solve \f$Ax=\lambda Bx\f$
|
||||
/// where \f$A\f$ is symmetric and \f$B\f$ is positive definite with the Cholesky
|
||||
/// decomposition \f$B=LL'\f$.
|
||||
///
|
||||
/// This solver requires two matrix operation objects: one for \f$A\f$ that implements
|
||||
/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the lower
|
||||
/// and upper triangular solving \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$.
|
||||
///
|
||||
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation
|
||||
/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and
|
||||
/// the second operation can be created using the DenseCholesky or SparseCholesky
|
||||
/// classes. If the users need to define their own operation classes, then they
|
||||
/// should implement all the public member functions as in those built-in classes.
|
||||
///
|
||||
/// \tparam Scalar The element type of the matrix.
|
||||
/// Currently supported types are `float`, `double` and `long double`.
|
||||
/// \tparam SelectionRule An enumeration value indicating the selection rule of
|
||||
/// the requested eigenvalues, for example `LARGEST_MAGN`
|
||||
/// to retrieve eigenvalues with the largest magnitude.
|
||||
/// The full list of enumeration values can be found in
|
||||
/// \ref Enumerations.
|
||||
/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either
|
||||
/// use the wrapper classes such as DenseSymMatProd and
|
||||
/// SparseSymMatProd, or define their
|
||||
/// own that implements all the public member functions as in
|
||||
/// DenseSymMatProd.
|
||||
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
|
||||
/// use the wrapper classes such as DenseCholesky and
|
||||
/// SparseCholesky, or define their
|
||||
/// own that implements all the public member functions as in
|
||||
/// DenseCholesky.
|
||||
/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver
|
||||
/// it is Spectra::GEIGS_CHOLESKY.
|
||||
///
|
||||
/// Below is an example that demonstrates the usage of this class.
|
||||
///
|
||||
/// \code{.cpp}
|
||||
/// #include <Eigen/Core>
|
||||
/// #include <Eigen/SparseCore>
|
||||
/// #include <Eigen/Eigenvalues>
|
||||
/// #include <Spectra/SymGEigsSolver.h>
|
||||
/// #include <Spectra/MatOp/DenseSymMatProd.h>
|
||||
/// #include <Spectra/MatOp/SparseCholesky.h>
|
||||
/// #include <iostream>
|
||||
///
|
||||
/// using namespace Spectra;
|
||||
///
|
||||
/// int main()
|
||||
/// {
|
||||
/// // We are going to solve the generalized eigenvalue problem A * x = lambda * B * x
|
||||
/// const int n = 100;
|
||||
///
|
||||
/// // Define the A matrix
|
||||
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n);
|
||||
/// Eigen::MatrixXd A = M + M.transpose();
|
||||
///
|
||||
/// // Define the B matrix, a band matrix with 2 on the diagonal and 1 on the subdiagonals
|
||||
/// Eigen::SparseMatrix<double> B(n, n);
|
||||
/// B.reserve(Eigen::VectorXi::Constant(n, 3));
|
||||
/// for(int i = 0; i < n; i++)
|
||||
/// {
|
||||
/// B.insert(i, i) = 2.0;
|
||||
/// if(i > 0)
|
||||
/// B.insert(i - 1, i) = 1.0;
|
||||
/// if(i < n - 1)
|
||||
/// B.insert(i + 1, i) = 1.0;
|
||||
/// }
|
||||
///
|
||||
/// // Construct matrix operation object using the wrapper classes
|
||||
/// DenseSymMatProd<double> op(A);
|
||||
/// SparseCholesky<double> Bop(B);
|
||||
///
|
||||
/// // Construct generalized eigen solver object, requesting the largest three generalized eigenvalues
|
||||
/// SymGEigsSolver<double, LARGEST_ALGE, DenseSymMatProd<double>, SparseCholesky<double>, GEIGS_CHOLESKY>
|
||||
/// geigs(&op, &Bop, 3, 6);
|
||||
///
|
||||
/// // Initialize and compute
|
||||
/// geigs.init();
|
||||
/// int nconv = geigs.compute();
|
||||
///
|
||||
/// // Retrieve results
|
||||
/// Eigen::VectorXd evalues;
|
||||
/// Eigen::MatrixXd evecs;
|
||||
/// if(geigs.info() == SUCCESSFUL)
|
||||
/// {
|
||||
/// evalues = geigs.eigenvalues();
|
||||
/// evecs = geigs.eigenvectors();
|
||||
/// }
|
||||
///
|
||||
/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl;
|
||||
/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl;
|
||||
///
|
||||
/// // Verify results using the generalized eigen solver in Eigen
|
||||
/// Eigen::MatrixXd Bdense = B;
|
||||
/// Eigen::GeneralizedSelfAdjointEigenSolver<Eigen::MatrixXd> es(A, Bdense);
|
||||
///
|
||||
/// std::cout << "Generalized eigenvalues:\n" << es.eigenvalues().tail(3) << std::endl;
|
||||
/// std::cout << "Generalized eigenvectors:\n" << es.eigenvectors().rightCols(3).topRows(10) << std::endl;
|
||||
///
|
||||
/// return 0;
|
||||
/// }
|
||||
/// \endcode
|
||||
|
||||
// Partial specialization for GEigsMode = GEIGS_CHOLESKY
|
||||
template <typename Scalar,
|
||||
int SelectionRule,
|
||||
typename OpType,
|
||||
typename BOpType>
|
||||
class SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_CHOLESKY> :
|
||||
public SymEigsBase<Scalar, SelectionRule, SymGEigsCholeskyOp<Scalar, OpType, BOpType>, IdentityBOp>
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
|
||||
BOpType* m_Bop;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create a solver object.
|
||||
///
|
||||
/// \param op Pointer to the \f$A\f$ matrix operation object. It
|
||||
/// should implement the matrix-vector multiplication operation of \f$A\f$:
|
||||
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
|
||||
/// create the object from the wrapper classes such as DenseSymMatProd, or
|
||||
/// define their own that implements all the public member functions
|
||||
/// as in DenseSymMatProd.
|
||||
/// \param Bop Pointer to the \f$B\f$ matrix operation object. It
|
||||
/// represents a Cholesky decomposition of \f$B\f$, and should
|
||||
/// implement the lower and upper triangular solving operations:
|
||||
/// calculating \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$ for any vector
|
||||
/// \f$v\f$, where \f$LL'=B\f$. Users could either
|
||||
/// create the object from the wrapper classes such as DenseCholesky, or
|
||||
/// define their own that implements all the public member functions
|
||||
/// as in DenseCholesky.
|
||||
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
|
||||
/// where \f$n\f$ is the size of matrix.
|
||||
/// \param ncv Parameter that controls the convergence speed of the algorithm.
|
||||
/// Typically a larger `ncv` means faster convergence, but it may
|
||||
/// also result in greater memory use and more matrix operations
|
||||
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
|
||||
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
|
||||
///
|
||||
SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) :
|
||||
SymEigsBase<Scalar, SelectionRule, SymGEigsCholeskyOp<Scalar, OpType, BOpType>, IdentityBOp>(
|
||||
new SymGEigsCholeskyOp<Scalar, OpType, BOpType>(*op, *Bop), NULL, nev, ncv),
|
||||
m_Bop(Bop)
|
||||
{}
|
||||
|
||||
/// \cond
|
||||
|
||||
~SymGEigsSolver()
|
||||
{
|
||||
// m_op contains the constructed SymGEigsCholeskyOp object
|
||||
delete this->m_op;
|
||||
}
|
||||
|
||||
Matrix eigenvectors(Index nvec) const
|
||||
{
|
||||
Matrix res = SymEigsBase<Scalar, SelectionRule, SymGEigsCholeskyOp<Scalar, OpType, BOpType>, IdentityBOp>::eigenvectors(nvec);
|
||||
Vector tmp(res.rows());
|
||||
const Index nconv = res.cols();
|
||||
for (Index i = 0; i < nconv; i++)
|
||||
{
|
||||
m_Bop->upper_triangular_solve(&res(0, i), tmp.data());
|
||||
res.col(i).noalias() = tmp;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
Matrix eigenvectors() const
|
||||
{
|
||||
return SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_CHOLESKY>::eigenvectors(this->m_nev);
|
||||
}
|
||||
|
||||
/// \endcond
|
||||
};
|
||||
|
||||
///
|
||||
/// \ingroup GEigenSolver
|
||||
///
|
||||
/// This class implements the generalized eigen solver for real symmetric
|
||||
/// matrices in the regular inverse mode, i.e., to solve \f$Ax=\lambda Bx\f$
|
||||
/// where \f$A\f$ is symmetric, and \f$B\f$ is positive definite with the operations
|
||||
/// defined below.
|
||||
///
|
||||
/// This solver requires two matrix operation objects: one for \f$A\f$ that implements
|
||||
/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the
|
||||
/// matrix-vector product \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$.
|
||||
///
|
||||
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation
|
||||
/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and
|
||||
/// the second operation can be created using the SparseRegularInverse class. There is no
|
||||
/// wrapper class for a dense \f$B\f$ matrix since in this case the Cholesky mode
|
||||
/// is always preferred. If the users need to define their own operation classes, then they
|
||||
/// should implement all the public member functions as in those built-in classes.
|
||||
///
|
||||
/// \tparam Scalar The element type of the matrix.
|
||||
/// Currently supported types are `float`, `double` and `long double`.
|
||||
/// \tparam SelectionRule An enumeration value indicating the selection rule of
|
||||
/// the requested eigenvalues, for example `LARGEST_MAGN`
|
||||
/// to retrieve eigenvalues with the largest magnitude.
|
||||
/// The full list of enumeration values can be found in
|
||||
/// \ref Enumerations.
|
||||
/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either
|
||||
/// use the wrapper classes such as DenseSymMatProd and
|
||||
/// SparseSymMatProd, or define their
|
||||
/// own that implements all the public member functions as in
|
||||
/// DenseSymMatProd.
|
||||
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
|
||||
/// use the wrapper class SparseRegularInverse, or define their
|
||||
/// own that implements all the public member functions as in
|
||||
/// SparseRegularInverse.
|
||||
/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver
|
||||
/// it is Spectra::GEIGS_REGULAR_INVERSE.
|
||||
///
|
||||
|
||||
// Partial specialization for GEigsMode = GEIGS_REGULAR_INVERSE
|
||||
template <typename Scalar,
|
||||
int SelectionRule,
|
||||
typename OpType,
|
||||
typename BOpType>
|
||||
class SymGEigsSolver<Scalar, SelectionRule, OpType, BOpType, GEIGS_REGULAR_INVERSE> :
|
||||
public SymEigsBase<Scalar, SelectionRule, SymGEigsRegInvOp<Scalar, OpType, BOpType>, BOpType>
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
|
||||
public:
|
||||
///
|
||||
/// Constructor to create a solver object.
|
||||
///
|
||||
/// \param op Pointer to the \f$A\f$ matrix operation object. It
|
||||
/// should implement the matrix-vector multiplication operation of \f$A\f$:
|
||||
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
|
||||
/// create the object from the wrapper classes such as DenseSymMatProd, or
|
||||
/// define their own that implements all the public member functions
|
||||
/// as in DenseSymMatProd.
|
||||
/// \param Bop Pointer to the \f$B\f$ matrix operation object. It should
|
||||
/// implement the multiplication operation \f$Bv\f$ and the linear equation
|
||||
/// solving operation \f$B^{-1}v\f$ for any vector \f$v\f$. Users could either
|
||||
/// create the object from the wrapper class SparseRegularInverse, or
|
||||
/// define their own that implements all the public member functions
|
||||
/// as in SparseRegularInverse.
|
||||
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
|
||||
/// where \f$n\f$ is the size of matrix.
|
||||
/// \param ncv Parameter that controls the convergence speed of the algorithm.
|
||||
/// Typically a larger `ncv` means faster convergence, but it may
|
||||
/// also result in greater memory use and more matrix operations
|
||||
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
|
||||
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
|
||||
///
|
||||
SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) :
|
||||
SymEigsBase<Scalar, SelectionRule, SymGEigsRegInvOp<Scalar, OpType, BOpType>, BOpType>(
|
||||
new SymGEigsRegInvOp<Scalar, OpType, BOpType>(*op, *Bop), Bop, nev, ncv)
|
||||
{}
|
||||
|
||||
/// \cond
|
||||
~SymGEigsSolver()
|
||||
{
|
||||
// m_op contains the constructed SymGEigsRegInvOp object
|
||||
delete this->m_op;
|
||||
}
|
||||
/// \endcond
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SYM_GEIGS_SOLVER_H
|
|
@ -0,0 +1,35 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef COMP_INFO_H
|
||||
#define COMP_INFO_H
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup Enumerations
|
||||
///
|
||||
/// The enumeration to report the status of computation.
|
||||
///
|
||||
enum COMPUTATION_INFO
|
||||
{
|
||||
SUCCESSFUL = 0, ///< Computation was successful.
|
||||
|
||||
NOT_COMPUTED, ///< Used in eigen solvers, indicating that computation
|
||||
///< has not been conducted. Users should call
|
||||
///< the `compute()` member function of solvers.
|
||||
|
||||
NOT_CONVERGING, ///< Used in eigen solvers, indicating that some eigenvalues
|
||||
///< did not converge. The `compute()`
|
||||
///< function returns the number of converged eigenvalues.
|
||||
|
||||
NUMERICAL_ISSUE ///< Used in Cholesky decomposition, indicating that the
|
||||
///< matrix is not positive definite.
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // COMP_INFO_H
|
|
@ -0,0 +1,32 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef GEIGS_MODE_H
|
||||
#define GEIGS_MODE_H
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup Enumerations
|
||||
///
|
||||
/// The enumeration to specify the mode of generalized eigenvalue solver.
|
||||
///
|
||||
enum GEIGS_MODE
|
||||
{
|
||||
GEIGS_CHOLESKY = 0, ///< Using Cholesky decomposition to solve generalized eigenvalues.
|
||||
|
||||
GEIGS_REGULAR_INVERSE, ///< Regular inverse mode for generalized eigenvalue solver.
|
||||
|
||||
GEIGS_SHIFT_INVERT, ///< Shift-and-invert mode for generalized eigenvalue solver.
|
||||
|
||||
GEIGS_BUCKLING, ///< Buckling mode for generalized eigenvalue solver.
|
||||
|
||||
GEIGS_CAYLEY ///< Cayley transformation mode for generalized eigenvalue solver.
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // GEIGS_MODE_H
|
|
@ -0,0 +1,275 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SELECTION_RULE_H
|
||||
#define SELECTION_RULE_H
|
||||
|
||||
#include <vector> // std::vector
|
||||
#include <cmath> // std::abs
|
||||
#include <algorithm> // std::sort
|
||||
#include <complex> // std::complex
|
||||
#include <utility> // std::pair
|
||||
#include <stdexcept> // std::invalid_argument
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \defgroup Enumerations
|
||||
///
|
||||
/// Enumeration types for the selection rule of eigenvalues.
|
||||
///
|
||||
|
||||
///
|
||||
/// \ingroup Enumerations
|
||||
///
|
||||
/// The enumeration of selection rules of desired eigenvalues.
|
||||
///
|
||||
enum SELECT_EIGENVALUE
|
||||
{
|
||||
LARGEST_MAGN = 0, ///< Select eigenvalues with largest magnitude. Magnitude
|
||||
///< means the absolute value for real numbers and norm for
|
||||
///< complex numbers. Applies to both symmetric and general
|
||||
///< eigen solvers.
|
||||
|
||||
LARGEST_REAL, ///< Select eigenvalues with largest real part. Only for general eigen solvers.
|
||||
|
||||
LARGEST_IMAG, ///< Select eigenvalues with largest imaginary part (in magnitude). Only for general eigen solvers.
|
||||
|
||||
LARGEST_ALGE, ///< Select eigenvalues with largest algebraic value, considering
|
||||
///< any negative sign. Only for symmetric eigen solvers.
|
||||
|
||||
SMALLEST_MAGN, ///< Select eigenvalues with smallest magnitude. Applies to both symmetric and general
|
||||
///< eigen solvers.
|
||||
|
||||
SMALLEST_REAL, ///< Select eigenvalues with smallest real part. Only for general eigen solvers.
|
||||
|
||||
SMALLEST_IMAG, ///< Select eigenvalues with smallest imaginary part (in magnitude). Only for general eigen solvers.
|
||||
|
||||
SMALLEST_ALGE, ///< Select eigenvalues with smallest algebraic value. Only for symmetric eigen solvers.
|
||||
|
||||
BOTH_ENDS ///< Select eigenvalues half from each end of the spectrum. When
|
||||
///< `nev` is odd, compute more from the high end. Only for symmetric eigen solvers.
|
||||
};
|
||||
|
||||
///
|
||||
/// \ingroup Enumerations
|
||||
///
|
||||
/// The enumeration of selection rules of desired eigenvalues. Alias for `SELECT_EIGENVALUE`.
|
||||
///
|
||||
enum SELECT_EIGENVALUE_ALIAS
|
||||
{
|
||||
WHICH_LM = 0, ///< Alias for `LARGEST_MAGN`
|
||||
WHICH_LR, ///< Alias for `LARGEST_REAL`
|
||||
WHICH_LI, ///< Alias for `LARGEST_IMAG`
|
||||
WHICH_LA, ///< Alias for `LARGEST_ALGE`
|
||||
WHICH_SM, ///< Alias for `SMALLEST_MAGN`
|
||||
WHICH_SR, ///< Alias for `SMALLEST_REAL`
|
||||
WHICH_SI, ///< Alias for `SMALLEST_IMAG`
|
||||
WHICH_SA, ///< Alias for `SMALLEST_ALGE`
|
||||
WHICH_BE ///< Alias for `BOTH_ENDS`
|
||||
};
|
||||
|
||||
/// \cond
|
||||
|
||||
// Get the element type of a "scalar"
|
||||
// ElemType<double> => double
|
||||
// ElemType< std::complex<double> > => double
|
||||
template <typename T>
|
||||
class ElemType
|
||||
{
|
||||
public:
|
||||
typedef T type;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ElemType<std::complex<T> >
|
||||
{
|
||||
public:
|
||||
typedef T type;
|
||||
};
|
||||
|
||||
// When comparing eigenvalues, we first calculate the "target"
|
||||
// to sort. For example, if we want to choose the eigenvalues with
|
||||
// largest magnitude, the target will be -abs(x).
|
||||
// The minus sign is due to the fact that std::sort() sorts in ascending order.
|
||||
|
||||
// Default target: throw an exception
|
||||
template <typename Scalar, int SelectionRule>
|
||||
class SortingTarget
|
||||
{
|
||||
public:
|
||||
static typename ElemType<Scalar>::type get(const Scalar& val)
|
||||
{
|
||||
using std::abs;
|
||||
throw std::invalid_argument("incompatible selection rule");
|
||||
return -abs(val);
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for LARGEST_MAGN
|
||||
// This covers [float, double, complex] x [LARGEST_MAGN]
|
||||
template <typename Scalar>
|
||||
class SortingTarget<Scalar, LARGEST_MAGN>
|
||||
{
|
||||
public:
|
||||
static typename ElemType<Scalar>::type get(const Scalar& val)
|
||||
{
|
||||
using std::abs;
|
||||
return -abs(val);
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for LARGEST_REAL
|
||||
// This covers [complex] x [LARGEST_REAL]
|
||||
template <typename RealType>
|
||||
class SortingTarget<std::complex<RealType>, LARGEST_REAL>
|
||||
{
|
||||
public:
|
||||
static RealType get(const std::complex<RealType>& val)
|
||||
{
|
||||
return -val.real();
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for LARGEST_IMAG
|
||||
// This covers [complex] x [LARGEST_IMAG]
|
||||
template <typename RealType>
|
||||
class SortingTarget<std::complex<RealType>, LARGEST_IMAG>
|
||||
{
|
||||
public:
|
||||
static RealType get(const std::complex<RealType>& val)
|
||||
{
|
||||
using std::abs;
|
||||
return -abs(val.imag());
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for LARGEST_ALGE
|
||||
// This covers [float, double] x [LARGEST_ALGE]
|
||||
template <typename Scalar>
|
||||
class SortingTarget<Scalar, LARGEST_ALGE>
|
||||
{
|
||||
public:
|
||||
static Scalar get(const Scalar& val)
|
||||
{
|
||||
return -val;
|
||||
}
|
||||
};
|
||||
|
||||
// Here BOTH_ENDS is the same as LARGEST_ALGE, but
|
||||
// we need some additional steps, which are done in
|
||||
// SymEigsSolver.h => retrieve_ritzpair().
|
||||
// There we move the smallest values to the proper locations.
|
||||
template <typename Scalar>
|
||||
class SortingTarget<Scalar, BOTH_ENDS>
|
||||
{
|
||||
public:
|
||||
static Scalar get(const Scalar& val)
|
||||
{
|
||||
return -val;
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for SMALLEST_MAGN
|
||||
// This covers [float, double, complex] x [SMALLEST_MAGN]
|
||||
template <typename Scalar>
|
||||
class SortingTarget<Scalar, SMALLEST_MAGN>
|
||||
{
|
||||
public:
|
||||
static typename ElemType<Scalar>::type get(const Scalar& val)
|
||||
{
|
||||
using std::abs;
|
||||
return abs(val);
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for SMALLEST_REAL
|
||||
// This covers [complex] x [SMALLEST_REAL]
|
||||
template <typename RealType>
|
||||
class SortingTarget<std::complex<RealType>, SMALLEST_REAL>
|
||||
{
|
||||
public:
|
||||
static RealType get(const std::complex<RealType>& val)
|
||||
{
|
||||
return val.real();
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for SMALLEST_IMAG
|
||||
// This covers [complex] x [SMALLEST_IMAG]
|
||||
template <typename RealType>
|
||||
class SortingTarget<std::complex<RealType>, SMALLEST_IMAG>
|
||||
{
|
||||
public:
|
||||
static RealType get(const std::complex<RealType>& val)
|
||||
{
|
||||
using std::abs;
|
||||
return abs(val.imag());
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for SMALLEST_ALGE
|
||||
// This covers [float, double] x [SMALLEST_ALGE]
|
||||
template <typename Scalar>
|
||||
class SortingTarget<Scalar, SMALLEST_ALGE>
|
||||
{
|
||||
public:
|
||||
static Scalar get(const Scalar& val)
|
||||
{
|
||||
return val;
|
||||
}
|
||||
};
|
||||
|
||||
// Sort eigenvalues and return the order index
|
||||
template <typename PairType>
|
||||
class PairComparator
|
||||
{
|
||||
public:
|
||||
bool operator()(const PairType& v1, const PairType& v2)
|
||||
{
|
||||
return v1.first < v2.first;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, int SelectionRule>
|
||||
class SortEigenvalue
|
||||
{
|
||||
private:
|
||||
typedef typename ElemType<T>::type TargetType; // Type of the sorting target, will be
|
||||
// a floating number type, e.g. "double"
|
||||
typedef std::pair<TargetType, int> PairType; // Type of the sorting pair, including
|
||||
// the sorting target and the index
|
||||
|
||||
std::vector<PairType> pair_sort;
|
||||
|
||||
public:
|
||||
SortEigenvalue(const T* start, int size) :
|
||||
pair_sort(size)
|
||||
{
|
||||
for (int i = 0; i < size; i++)
|
||||
{
|
||||
pair_sort[i].first = SortingTarget<T, SelectionRule>::get(start[i]);
|
||||
pair_sort[i].second = i;
|
||||
}
|
||||
PairComparator<PairType> comp;
|
||||
std::sort(pair_sort.begin(), pair_sort.end(), comp);
|
||||
}
|
||||
|
||||
std::vector<int> index()
|
||||
{
|
||||
std::vector<int> ind(pair_sort.size());
|
||||
for (unsigned int i = 0; i < ind.size(); i++)
|
||||
ind[i] = pair_sort[i].second;
|
||||
|
||||
return ind;
|
||||
}
|
||||
};
|
||||
|
||||
/// \endcond
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // SELECTION_RULE_H
|
|
@ -0,0 +1,91 @@
|
|||
// Copyright (C) 2016-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef SIMPLE_RANDOM_H
|
||||
#define SIMPLE_RANDOM_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
/// \cond
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
// We need a simple pseudo random number generator here:
|
||||
// 1. It is used to generate initial and restarted residual vector.
|
||||
// 2. It is not necessary to be so "random" and advanced. All we hope
|
||||
// is that the residual vector is not in the space spanned by the
|
||||
// current Krylov space. This should be met almost surely.
|
||||
// 3. We don't want to call RNG in C++, since we actually want the
|
||||
// algorithm to be deterministic. Also, calling RNG in C/C++ is not
|
||||
// allowed in R packages submitted to CRAN.
|
||||
// 4. The method should be as simple as possible, so an LCG is enough.
|
||||
// 5. Based on public domain code by Ray Gardner
|
||||
// http://stjarnhimlen.se/snippets/rg_rand.c
|
||||
|
||||
template <typename Scalar = double>
|
||||
class SimpleRandom
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Index Index;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
|
||||
const unsigned int m_a; // multiplier
|
||||
const unsigned long m_max; // 2^31 - 1
|
||||
long m_rand;
|
||||
|
||||
inline long next_long_rand(long seed)
|
||||
{
|
||||
unsigned long lo, hi;
|
||||
|
||||
lo = m_a * (long) (seed & 0xFFFF);
|
||||
hi = m_a * (long) ((unsigned long) seed >> 16);
|
||||
lo += (hi & 0x7FFF) << 16;
|
||||
if (lo > m_max)
|
||||
{
|
||||
lo &= m_max;
|
||||
++lo;
|
||||
}
|
||||
lo += hi >> 15;
|
||||
if (lo > m_max)
|
||||
{
|
||||
lo &= m_max;
|
||||
++lo;
|
||||
}
|
||||
return (long) lo;
|
||||
}
|
||||
|
||||
public:
|
||||
SimpleRandom(unsigned long init_seed) :
|
||||
m_a(16807),
|
||||
m_max(2147483647L),
|
||||
m_rand(init_seed ? (init_seed & m_max) : 1)
|
||||
{}
|
||||
|
||||
Scalar random()
|
||||
{
|
||||
m_rand = next_long_rand(m_rand);
|
||||
return Scalar(m_rand) / Scalar(m_max) - Scalar(0.5);
|
||||
}
|
||||
|
||||
// Vector of random numbers of type Scalar
|
||||
// Ranging from -0.5 to 0.5
|
||||
Vector random_vec(const Index len)
|
||||
{
|
||||
Vector res(len);
|
||||
for (Index i = 0; i < len; i++)
|
||||
{
|
||||
m_rand = next_long_rand(m_rand);
|
||||
res[i] = Scalar(m_rand) / Scalar(m_max) - Scalar(0.5);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
/// \endcond
|
||||
|
||||
#endif // SIMPLE_RANDOM_H
|
|
@ -0,0 +1,71 @@
|
|||
// Copyright (C) 2018-2019 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef TYPE_TRAITS_H
|
||||
#define TYPE_TRAITS_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <limits>
|
||||
|
||||
/// \cond
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
// For a real value type "Scalar", we want to know its smallest
|
||||
// positive value, i.e., std::numeric_limits<Scalar>::min().
|
||||
// However, we must take non-standard value types into account,
|
||||
// so we rely on Eigen::NumTraits.
|
||||
//
|
||||
// Eigen::NumTraits has defined epsilon() and lowest(), but
|
||||
// lowest() means negative highest(), which is a very small
|
||||
// negative value.
|
||||
//
|
||||
// Therefore, we manually define this limit, and use eplison()^3
|
||||
// to mimic it for non-standard types.
|
||||
|
||||
// Generic definition
|
||||
template <typename Scalar>
|
||||
struct TypeTraits
|
||||
{
|
||||
static inline Scalar min()
|
||||
{
|
||||
return Eigen::numext::pow(Eigen::NumTraits<Scalar>::epsilon(), Scalar(3));
|
||||
}
|
||||
};
|
||||
|
||||
// Full specialization
|
||||
template <>
|
||||
struct TypeTraits<float>
|
||||
{
|
||||
static inline float min()
|
||||
{
|
||||
return std::numeric_limits<float>::min();
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct TypeTraits<double>
|
||||
{
|
||||
static inline double min()
|
||||
{
|
||||
return std::numeric_limits<double>::min();
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct TypeTraits<long double>
|
||||
{
|
||||
static inline long double min()
|
||||
{
|
||||
return std::numeric_limits<long double>::min();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
/// \endcond
|
||||
|
||||
#endif // TYPE_TRAITS_H
|
|
@ -0,0 +1,552 @@
|
|||
// Written by Anna Araslanova
|
||||
// Modified by Yixuan Qiu
|
||||
// License: MIT
|
||||
|
||||
#ifndef LOBPCG_SOLVER
|
||||
#define LOBPCG_SOLVER
|
||||
|
||||
#include <functional>
|
||||
#include <map>
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/SparseCore>
|
||||
#include <Eigen/Eigenvalues>
|
||||
#include <Eigen/SVD>
|
||||
#include <Eigen/SparseCholesky>
|
||||
|
||||
#include "../SymGEigsSolver.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
///
|
||||
/// \ingroup EigenSolver
|
||||
///
|
||||
|
||||
/// *** METHOD
|
||||
/// The class represent the LOBPCG algorithm, which was invented by Andrew Knyazev
|
||||
/// Theoretical background of the procedure can be found in the articles below
|
||||
/// - Knyazev, A.V., 2001. Toward the optimal preconditioned eigensolver : Locally optimal block preconditioned conjugate gradient method.SIAM journal on scientific computing, 23(2), pp.517 - 541.
|
||||
/// - Knyazev, A.V., Argentati, M.E., Lashuk, I. and Ovtchinnikov, E.E., 2007. Block locally optimal preconditioned eigenvalue xolvers(BLOPEX) in HYPRE and PETSc.SIAM Journal on Scientific Computing, 29(5), pp.2224 - 2239.
|
||||
///
|
||||
/// *** CONDITIONS OF USE
|
||||
/// Locally Optimal Block Preconditioned Conjugate Gradient(LOBPCG) is a method for finding the M smallest eigenvalues
|
||||
/// and eigenvectors of a large symmetric positive definite generalized eigenvalue problem
|
||||
/// \f$Ax=\lambda Bx,\f$
|
||||
/// where \f$A_{NxN}\f$ is a symmetric matrix, \f$B\f$ is symmetric and positive - definite. \f$A and B\f$ are also assumed large and sparse
|
||||
/// \f$\textit{M}\f$ should be \f$\<< textit{N}\f$ (at least \f$\textit{5M} < \textit{N} \f$)
|
||||
///
|
||||
/// *** ARGUMENTS
|
||||
/// Eigen::SparseMatrix<long double> A; // N*N - Ax = lambda*Bx, lrage and sparse
|
||||
/// Eigen::SparseMatrix<long double> X; // N*M - initial approximations to eigenvectors (random in general case)
|
||||
/// Spectra::LOBPCGSolver<long double> solver(A, X);
|
||||
/// *Eigen::SparseMatrix<long double> B; // N*N - Ax = lambda*Bx, sparse, positive definite
|
||||
/// solver.setConstraints(B);
|
||||
/// *Eigen::SparseMatrix<long double> Y; // N*K - constraints, already found eigenvectors
|
||||
/// solver.setB(B);
|
||||
/// *Eigen::SparseMatrix<long double> T; // N*N - preconditioner ~ A^-1
|
||||
/// solver.setPreconditioner(T);
|
||||
///
|
||||
/// *** OUTCOMES
|
||||
/// solver.solve(); // compute eigenpairs // void
|
||||
/// solver.info(); // state of converjance // int
|
||||
/// solver.residuals(); // get residuals to evaluate biases // Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>
|
||||
/// solver.eigenvalues(); // get eigenvalues // Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>
|
||||
/// solver.eigenvectors(); // get eigenvectors // Eigen::Matrix<Scalar, Eigen::Dynamic, 1>
|
||||
///
|
||||
/// *** EXAMPLE
|
||||
/// \code{.cpp}
|
||||
/// #include <Spectra/contrib/SymSparseEigsSolverLOBPCG.h>
|
||||
///
|
||||
/// // random A
|
||||
/// Matrix a;
|
||||
/// a = (Matrix::Random(10, 10).array() > 0.6).cast<long double>() * Matrix::Random(10, 10).array() * 5;
|
||||
/// a = Matrix((a).triangularView<Eigen::Lower>()) + Matrix((a).triangularView<Eigen::Lower>()).transpose();
|
||||
/// for (int i = 0; i < 10; i++)
|
||||
/// a(i, i) = i + 0.5;
|
||||
/// std::cout << a << "\n";
|
||||
/// Eigen::SparseMatrix<long double> A(a.sparseView());
|
||||
/// // random X
|
||||
/// Eigen::Matrix<long double, 10, 2> x;
|
||||
/// x = Matrix::Random(10, 2).array();
|
||||
/// Eigen::SparseMatrix<long double> X(x.sparseView());
|
||||
/// // solve Ax = lambda*x
|
||||
/// Spectra::LOBPCGSolver<long double> solver(A, X);
|
||||
/// solver.compute(10, 1e-4); // 10 iterations, L2_tolerance = 1e-4*N
|
||||
/// std::cout << "info\n" << solver.info() << std::endl;
|
||||
/// std::cout << "eigenvalues\n" << solver.eigenvalues() << std::endl;
|
||||
/// std::cout << "eigenvectors\n" << solver.eigenvectors() << std::endl;
|
||||
/// std::cout << "residuals\n" << solver.residuals() << std::endl;
|
||||
/// \endcode
|
||||
///
|
||||
|
||||
template <typename Scalar = long double>
|
||||
class LOBPCGSolver
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
|
||||
typedef std::complex<Scalar> Complex;
|
||||
typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix;
|
||||
typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector;
|
||||
|
||||
typedef Eigen::SparseMatrix<Scalar> SparseMatrix;
|
||||
typedef Eigen::SparseMatrix<Complex> SparseComplexMatrix;
|
||||
|
||||
const int m_n; // dimension of matrix A
|
||||
const int m_nev; // number of eigenvalues requested
|
||||
SparseMatrix A, X;
|
||||
SparseMatrix m_Y, m_B, m_preconditioner;
|
||||
bool flag_with_constraints, flag_with_B, flag_with_preconditioner;
|
||||
|
||||
public:
|
||||
SparseMatrix m_residuals;
|
||||
Matrix m_evectors;
|
||||
Vector m_evalues;
|
||||
int m_info;
|
||||
|
||||
private:
|
||||
// B-orthonormalize matrix M
|
||||
int orthogonalizeInPlace(SparseMatrix& M, SparseMatrix& B,
|
||||
SparseMatrix& true_BM, bool has_true_BM = false)
|
||||
{
|
||||
SparseMatrix BM;
|
||||
|
||||
if (has_true_BM == false)
|
||||
{
|
||||
if (flag_with_B)
|
||||
{
|
||||
BM = B * M;
|
||||
}
|
||||
else
|
||||
{
|
||||
BM = M;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
BM = true_BM;
|
||||
}
|
||||
|
||||
Eigen::SimplicialLDLT<SparseMatrix> chol_MBM(M.transpose() * BM);
|
||||
|
||||
if (chol_MBM.info() != SUCCESSFUL)
|
||||
{
|
||||
// LDLT decomposition fail
|
||||
m_info = chol_MBM.info();
|
||||
return chol_MBM.info();
|
||||
}
|
||||
|
||||
SparseComplexMatrix Upper_MBM = chol_MBM.matrixU().template cast<Complex>();
|
||||
ComplexVector D_MBM_vec = chol_MBM.vectorD().template cast<Complex>();
|
||||
|
||||
D_MBM_vec = D_MBM_vec.cwiseSqrt();
|
||||
|
||||
for (int i = 0; i < D_MBM_vec.rows(); i++)
|
||||
{
|
||||
D_MBM_vec(i) = Complex(1.0, 0.0) / D_MBM_vec(i);
|
||||
}
|
||||
|
||||
SparseComplexMatrix D_MBM_mat(D_MBM_vec.asDiagonal());
|
||||
|
||||
SparseComplexMatrix U_inv(Upper_MBM.rows(), Upper_MBM.cols());
|
||||
U_inv.setIdentity();
|
||||
Upper_MBM.template triangularView<Eigen::Upper>().solveInPlace(U_inv);
|
||||
|
||||
SparseComplexMatrix right_product = U_inv * D_MBM_mat;
|
||||
M = M * right_product.real();
|
||||
if (flag_with_B)
|
||||
{
|
||||
true_BM = B * M;
|
||||
}
|
||||
else
|
||||
{
|
||||
true_BM = M;
|
||||
}
|
||||
|
||||
return SUCCESSFUL;
|
||||
}
|
||||
|
||||
void applyConstraintsInPlace(SparseMatrix& X, SparseMatrix& Y,
|
||||
SparseMatrix& B)
|
||||
{
|
||||
SparseMatrix BY;
|
||||
if (flag_with_B)
|
||||
{
|
||||
BY = B * Y;
|
||||
}
|
||||
else
|
||||
{
|
||||
BY = Y;
|
||||
}
|
||||
|
||||
SparseMatrix YBY = Y.transpose() * BY;
|
||||
SparseMatrix BYX = BY.transpose() * X;
|
||||
|
||||
SparseMatrix YBY_XYX = (Matrix(YBY).bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(Matrix(BYX))).sparseView();
|
||||
X = X - Y * YBY_XYX;
|
||||
}
|
||||
|
||||
/*
|
||||
return
|
||||
'AB
|
||||
CD'
|
||||
*/
|
||||
Matrix stack_4_matricies(Matrix A, Matrix B,
|
||||
Matrix C, Matrix D)
|
||||
{
|
||||
Matrix result(A.rows() + C.rows(), A.cols() + B.cols());
|
||||
result.topLeftCorner(A.rows(), A.cols()) = A;
|
||||
result.topRightCorner(B.rows(), B.cols()) = B;
|
||||
result.bottomLeftCorner(C.rows(), C.cols()) = C;
|
||||
result.bottomRightCorner(D.rows(), D.cols()) = D;
|
||||
return result;
|
||||
}
|
||||
|
||||
Matrix stack_9_matricies(Matrix A, Matrix B, Matrix C,
|
||||
Matrix D, Matrix E, Matrix F,
|
||||
Matrix G, Matrix H, Matrix I)
|
||||
{
|
||||
Matrix result(A.rows() + D.rows() + G.rows(), A.cols() + B.cols() + C.cols());
|
||||
result.block(0, 0, A.rows(), A.cols()) = A;
|
||||
result.block(0, A.cols(), B.rows(), B.cols()) = B;
|
||||
result.block(0, A.cols() + B.cols(), C.rows(), C.cols()) = C;
|
||||
result.block(A.rows(), 0, D.rows(), D.cols()) = D;
|
||||
result.block(A.rows(), A.cols(), E.rows(), E.cols()) = E;
|
||||
result.block(A.rows(), A.cols() + B.cols(), F.rows(), F.cols()) = F;
|
||||
result.block(A.rows() + D.rows(), 0, G.rows(), G.cols()) = G;
|
||||
result.block(A.rows() + D.rows(), A.cols(), H.rows(), H.cols()) = H;
|
||||
result.block(A.rows() + D.rows(), A.cols() + B.cols(), I.rows(), I.cols()) = I;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void sort_epairs(Vector& evalues, Matrix& evectors, int SelectionRule)
|
||||
{
|
||||
std::function<bool(Scalar, Scalar)> cmp;
|
||||
if (SelectionRule == SMALLEST_ALGE)
|
||||
cmp = std::less<Scalar>{};
|
||||
else
|
||||
cmp = std::greater<Scalar>{};
|
||||
|
||||
std::map<Scalar, Vector, decltype(cmp)> epairs(cmp);
|
||||
for (int i = 0; i < m_evectors.cols(); ++i)
|
||||
epairs.insert(std::make_pair(evalues(i), evectors.col(i)));
|
||||
|
||||
int i = 0;
|
||||
for (auto& epair : epairs)
|
||||
{
|
||||
evectors.col(i) = epair.second;
|
||||
evalues(i) = epair.first;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
void removeColumns(SparseMatrix& matrix, std::vector<int>& colToRemove)
|
||||
{
|
||||
// remove columns through matrix multiplication
|
||||
SparseMatrix new_matrix(matrix.cols(), matrix.cols() - int(colToRemove.size()));
|
||||
int iCol = 0;
|
||||
std::vector<Eigen::Triplet<Scalar>> tripletList;
|
||||
tripletList.reserve(matrix.cols() - int(colToRemove.size()));
|
||||
|
||||
for (int iRow = 0; iRow < matrix.cols(); iRow++)
|
||||
{
|
||||
if (std::find(colToRemove.begin(), colToRemove.end(), iRow) == colToRemove.end())
|
||||
{
|
||||
tripletList.push_back(Eigen::Triplet<Scalar>(iRow, iCol, 1));
|
||||
iCol++;
|
||||
}
|
||||
}
|
||||
|
||||
new_matrix.setFromTriplets(tripletList.begin(), tripletList.end());
|
||||
matrix = matrix * new_matrix;
|
||||
}
|
||||
|
||||
int checkConvergence_getBlocksize(SparseMatrix& m_residuals, Scalar tolerance_L2, std::vector<int>& columnsToDelete)
|
||||
{
|
||||
// square roots from sum of squares by column
|
||||
int BlockSize = m_nev;
|
||||
Scalar sum, buffer;
|
||||
|
||||
for (int iCol = 0; iCol < m_nev; iCol++)
|
||||
{
|
||||
sum = 0;
|
||||
for (int iRow = 0; iRow < m_n; iRow++)
|
||||
{
|
||||
buffer = m_residuals.coeff(iRow, iCol);
|
||||
sum += buffer * buffer;
|
||||
}
|
||||
|
||||
if (sqrt(sum) < tolerance_L2)
|
||||
{
|
||||
BlockSize--;
|
||||
columnsToDelete.push_back(iCol);
|
||||
}
|
||||
}
|
||||
return BlockSize;
|
||||
}
|
||||
|
||||
public:
|
||||
LOBPCGSolver(const SparseMatrix& A, const SparseMatrix X) :
|
||||
m_n(A.rows()),
|
||||
m_nev(X.cols()),
|
||||
m_info(NOT_COMPUTED),
|
||||
flag_with_constraints(false),
|
||||
flag_with_B(false),
|
||||
flag_with_preconditioner(false),
|
||||
A(A),
|
||||
X(X)
|
||||
{
|
||||
if (A.rows() != X.rows() || A.rows() != A.cols())
|
||||
throw std::invalid_argument("Wrong size");
|
||||
|
||||
//if (m_n < 5* m_nev)
|
||||
// throw std::invalid_argument("The problem size is small compared to the block size. Use standard eigensolver");
|
||||
}
|
||||
|
||||
void setConstraints(const SparseMatrix& Y)
|
||||
{
|
||||
m_Y = Y;
|
||||
flag_with_constraints = true;
|
||||
}
|
||||
|
||||
void setB(const SparseMatrix& B)
|
||||
{
|
||||
if (B.rows() != A.rows() || B.cols() != A.cols())
|
||||
throw std::invalid_argument("Wrong size");
|
||||
m_B = B;
|
||||
flag_with_B = true;
|
||||
}
|
||||
|
||||
void setPreconditioner(const SparseMatrix& preconditioner)
|
||||
{
|
||||
m_preconditioner = preconditioner;
|
||||
flag_with_preconditioner = true;
|
||||
}
|
||||
|
||||
void compute(int maxit = 10, Scalar tol_div_n = 1e-7)
|
||||
{
|
||||
Scalar tolerance_L2 = tol_div_n * m_n;
|
||||
int BlockSize;
|
||||
int max_iter = std::min(m_n, maxit);
|
||||
|
||||
SparseMatrix directions, AX, AR, BX, AD, ADD, DD, BDD, BD, XAD, RAD, DAD, XBD, RBD, BR, sparse_eVecX, sparse_eVecR, sparse_eVecD, inverse_matrix;
|
||||
Matrix XAR, RAR, XBR, gramA, gramB, eVecX, eVecR, eVecD;
|
||||
std::vector<int> columnsToDelete;
|
||||
|
||||
if (flag_with_constraints)
|
||||
{
|
||||
// Apply the constraints Y to X
|
||||
applyConstraintsInPlace(X, m_Y, m_B);
|
||||
}
|
||||
|
||||
// Make initial vectors orthonormal
|
||||
// implicit BX declaration
|
||||
if (orthogonalizeInPlace(X, m_B, BX) != SUCCESSFUL)
|
||||
{
|
||||
max_iter = 0;
|
||||
}
|
||||
|
||||
AX = A * X;
|
||||
// Solve the following NxN eigenvalue problem for all N eigenvalues and -vectors:
|
||||
// first approximation via a dense problem
|
||||
Eigen::EigenSolver<Matrix> eigs(Matrix(X.transpose() * AX));
|
||||
|
||||
if (eigs.info() != SUCCESSFUL)
|
||||
{
|
||||
m_info = eigs.info();
|
||||
max_iter = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_evalues = eigs.eigenvalues().real();
|
||||
m_evectors = eigs.eigenvectors().real();
|
||||
sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE);
|
||||
sparse_eVecX = m_evectors.sparseView();
|
||||
|
||||
X = X * sparse_eVecX;
|
||||
AX = AX * sparse_eVecX;
|
||||
BX = BX * sparse_eVecX;
|
||||
}
|
||||
|
||||
for (int iter_num = 0; iter_num < max_iter; iter_num++)
|
||||
{
|
||||
m_residuals.resize(m_n, m_nev);
|
||||
for (int i = 0; i < m_nev; i++)
|
||||
{
|
||||
m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i);
|
||||
}
|
||||
BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete);
|
||||
|
||||
if (BlockSize == 0)
|
||||
{
|
||||
m_info = SUCCESSFUL;
|
||||
break;
|
||||
}
|
||||
|
||||
// substitution of the original active mask
|
||||
if (columnsToDelete.size() > 0)
|
||||
{
|
||||
removeColumns(m_residuals, columnsToDelete);
|
||||
if (iter_num > 0)
|
||||
{
|
||||
removeColumns(directions, columnsToDelete);
|
||||
removeColumns(AD, columnsToDelete);
|
||||
removeColumns(BD, columnsToDelete);
|
||||
}
|
||||
columnsToDelete.clear(); // for next iteration
|
||||
}
|
||||
|
||||
if (flag_with_preconditioner)
|
||||
{
|
||||
// Apply the preconditioner to the residuals
|
||||
m_residuals = m_preconditioner * m_residuals;
|
||||
}
|
||||
|
||||
if (flag_with_constraints)
|
||||
{
|
||||
// Apply the constraints Y to residuals
|
||||
applyConstraintsInPlace(m_residuals, m_Y, m_B);
|
||||
}
|
||||
|
||||
if (orthogonalizeInPlace(m_residuals, m_B, BR) != SUCCESSFUL)
|
||||
{
|
||||
break;
|
||||
}
|
||||
AR = A * m_residuals;
|
||||
|
||||
// Orthonormalize conjugate directions
|
||||
if (iter_num > 0)
|
||||
{
|
||||
if (orthogonalizeInPlace(directions, m_B, BD, true) != SUCCESSFUL)
|
||||
{
|
||||
break;
|
||||
}
|
||||
AD = A * directions;
|
||||
}
|
||||
|
||||
// Perform the Rayleigh Ritz Procedure
|
||||
XAR = Matrix(X.transpose() * AR);
|
||||
RAR = Matrix(m_residuals.transpose() * AR);
|
||||
XBR = Matrix(X.transpose() * BR);
|
||||
|
||||
if (iter_num > 0)
|
||||
{
|
||||
XAD = X.transpose() * AD;
|
||||
RAD = m_residuals.transpose() * AD;
|
||||
DAD = directions.transpose() * AD;
|
||||
XBD = X.transpose() * BD;
|
||||
RBD = m_residuals.transpose() * BD;
|
||||
|
||||
gramA = stack_9_matricies(m_evalues.asDiagonal(), XAR, XAD, XAR.transpose(), RAR, RAD, XAD.transpose(), RAD.transpose(), DAD.transpose());
|
||||
gramB = stack_9_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBD, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize), RBD, XBD.transpose(), RBD.transpose(), Matrix::Identity(BlockSize, BlockSize));
|
||||
}
|
||||
else
|
||||
{
|
||||
gramA = stack_4_matricies(m_evalues.asDiagonal(), XAR, XAR.transpose(), RAR);
|
||||
gramB = stack_4_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize));
|
||||
}
|
||||
|
||||
//calculate the lowest/largest m eigenpairs; Solve the generalized eigenvalue problem.
|
||||
DenseSymMatProd<Scalar> Aop(gramA);
|
||||
DenseCholesky<Scalar> Bop(gramB);
|
||||
|
||||
SymGEigsSolver<Scalar, SMALLEST_ALGE, DenseSymMatProd<Scalar>,
|
||||
DenseCholesky<Scalar>, GEIGS_CHOLESKY>
|
||||
geigs(&Aop, &Bop, m_nev, std::min(10, int(gramA.rows()) - 1));
|
||||
|
||||
geigs.init();
|
||||
int nconv = geigs.compute();
|
||||
|
||||
//Mat evecs;
|
||||
if (geigs.info() == SUCCESSFUL)
|
||||
{
|
||||
m_evalues = geigs.eigenvalues();
|
||||
m_evectors = geigs.eigenvectors();
|
||||
sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Problem With General EgenVec
|
||||
m_info = geigs.info();
|
||||
break;
|
||||
}
|
||||
|
||||
// Compute Ritz vectors
|
||||
if (iter_num > 0)
|
||||
{
|
||||
eVecX = m_evectors.block(0, 0, m_nev, m_nev);
|
||||
eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev);
|
||||
eVecD = m_evectors.block(m_nev + BlockSize, 0, BlockSize, m_nev);
|
||||
|
||||
sparse_eVecX = eVecX.sparseView();
|
||||
sparse_eVecR = eVecR.sparseView();
|
||||
sparse_eVecD = eVecD.sparseView();
|
||||
|
||||
DD = m_residuals * sparse_eVecR; // new conjugate directions
|
||||
ADD = AR * sparse_eVecR;
|
||||
BDD = BR * sparse_eVecR;
|
||||
|
||||
DD = DD + directions * sparse_eVecD;
|
||||
ADD = ADD + AD * sparse_eVecD;
|
||||
BDD = BDD + BD * sparse_eVecD;
|
||||
}
|
||||
else
|
||||
{
|
||||
eVecX = m_evectors.block(0, 0, m_nev, m_nev);
|
||||
eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev);
|
||||
|
||||
sparse_eVecX = eVecX.sparseView();
|
||||
sparse_eVecR = eVecR.sparseView();
|
||||
|
||||
DD = m_residuals * sparse_eVecR;
|
||||
ADD = AR * sparse_eVecR;
|
||||
BDD = BR * sparse_eVecR;
|
||||
}
|
||||
|
||||
X = X * sparse_eVecX + DD;
|
||||
AX = AX * sparse_eVecX + ADD;
|
||||
BX = BX * sparse_eVecX + BDD;
|
||||
|
||||
directions = DD;
|
||||
AD = ADD;
|
||||
BD = BDD;
|
||||
|
||||
} // iteration loop
|
||||
|
||||
// calculate last residuals
|
||||
m_residuals.resize(m_n, m_nev);
|
||||
for (int i = 0; i < m_nev; i++)
|
||||
{
|
||||
m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i);
|
||||
}
|
||||
BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete);
|
||||
|
||||
if (BlockSize == 0)
|
||||
{
|
||||
m_info = SUCCESSFUL;
|
||||
}
|
||||
} // compute
|
||||
|
||||
Vector eigenvalues()
|
||||
{
|
||||
return m_evalues;
|
||||
}
|
||||
|
||||
Matrix eigenvectors()
|
||||
{
|
||||
return m_evectors;
|
||||
}
|
||||
|
||||
Matrix residuals()
|
||||
{
|
||||
return Matrix(m_residuals);
|
||||
}
|
||||
|
||||
int info() { return m_info; }
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // LOBPCG_SOLVER
|
|
@ -0,0 +1,202 @@
|
|||
// Copyright (C) 2018 Yixuan Qiu <yixuan.qiu@cos.name>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef PARTIAL_SVD_SOLVER_H
|
||||
#define PARTIAL_SVD_SOLVER_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include "../SymEigsSolver.h"
|
||||
|
||||
namespace Spectra {
|
||||
|
||||
// Abstract class for matrix operation
|
||||
template <typename Scalar>
|
||||
class SVDMatOp
|
||||
{
|
||||
public:
|
||||
virtual int rows() const = 0;
|
||||
virtual int cols() const = 0;
|
||||
|
||||
// y_out = A' * A * x_in or y_out = A * A' * x_in
|
||||
virtual void perform_op(const Scalar* x_in, Scalar* y_out) = 0;
|
||||
|
||||
virtual ~SVDMatOp() {}
|
||||
};
|
||||
|
||||
// Operation of a tall matrix in SVD
|
||||
// We compute the eigenvalues of A' * A
|
||||
// MatrixType is either Eigen::Matrix<Scalar, ...> or Eigen::SparseMatrix<Scalar, ...>
|
||||
template <typename Scalar, typename MatrixType>
|
||||
class SVDTallMatOp : public SVDMatOp<Scalar>
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef const Eigen::Ref<const MatrixType> ConstGenericMatrix;
|
||||
|
||||
ConstGenericMatrix m_mat;
|
||||
const int m_dim;
|
||||
Vector m_cache;
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
SVDTallMatOp(ConstGenericMatrix& mat) :
|
||||
m_mat(mat),
|
||||
m_dim(std::min(mat.rows(), mat.cols())),
|
||||
m_cache(mat.rows())
|
||||
{}
|
||||
|
||||
// These are the rows and columns of A' * A
|
||||
int rows() const { return m_dim; }
|
||||
int cols() const { return m_dim; }
|
||||
|
||||
// y_out = A' * A * x_in
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out)
|
||||
{
|
||||
MapConstVec x(x_in, m_mat.cols());
|
||||
MapVec y(y_out, m_mat.cols());
|
||||
m_cache.noalias() = m_mat * x;
|
||||
y.noalias() = m_mat.transpose() * m_cache;
|
||||
}
|
||||
};
|
||||
|
||||
// Operation of a wide matrix in SVD
|
||||
// We compute the eigenvalues of A * A'
|
||||
// MatrixType is either Eigen::Matrix<Scalar, ...> or Eigen::SparseMatrix<Scalar, ...>
|
||||
template <typename Scalar, typename MatrixType>
|
||||
class SVDWideMatOp : public SVDMatOp<Scalar>
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Map<const Vector> MapConstVec;
|
||||
typedef Eigen::Map<Vector> MapVec;
|
||||
typedef const Eigen::Ref<const MatrixType> ConstGenericMatrix;
|
||||
|
||||
ConstGenericMatrix m_mat;
|
||||
const int m_dim;
|
||||
Vector m_cache;
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
SVDWideMatOp(ConstGenericMatrix& mat) :
|
||||
m_mat(mat),
|
||||
m_dim(std::min(mat.rows(), mat.cols())),
|
||||
m_cache(mat.cols())
|
||||
{}
|
||||
|
||||
// These are the rows and columns of A * A'
|
||||
int rows() const { return m_dim; }
|
||||
int cols() const { return m_dim; }
|
||||
|
||||
// y_out = A * A' * x_in
|
||||
void perform_op(const Scalar* x_in, Scalar* y_out)
|
||||
{
|
||||
MapConstVec x(x_in, m_mat.rows());
|
||||
MapVec y(y_out, m_mat.rows());
|
||||
m_cache.noalias() = m_mat.transpose() * x;
|
||||
y.noalias() = m_mat * m_cache;
|
||||
}
|
||||
};
|
||||
|
||||
// Partial SVD solver
|
||||
// MatrixType is either Eigen::Matrix<Scalar, ...> or Eigen::SparseMatrix<Scalar, ...>
|
||||
template <typename Scalar = double,
|
||||
typename MatrixType = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> >
|
||||
class PartialSVDSolver
|
||||
{
|
||||
private:
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
|
||||
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
|
||||
typedef const Eigen::Ref<const MatrixType> ConstGenericMatrix;
|
||||
|
||||
ConstGenericMatrix m_mat;
|
||||
const int m_m;
|
||||
const int m_n;
|
||||
SVDMatOp<Scalar>* m_op;
|
||||
SymEigsSolver<Scalar, LARGEST_ALGE, SVDMatOp<Scalar> >* m_eigs;
|
||||
int m_nconv;
|
||||
Matrix m_evecs;
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
PartialSVDSolver(ConstGenericMatrix& mat, int ncomp, int ncv) :
|
||||
m_mat(mat), m_m(mat.rows()), m_n(mat.cols()), m_evecs(0, 0)
|
||||
{
|
||||
// Determine the matrix type, tall or wide
|
||||
if (m_m > m_n)
|
||||
{
|
||||
m_op = new SVDTallMatOp<Scalar, MatrixType>(mat);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_op = new SVDWideMatOp<Scalar, MatrixType>(mat);
|
||||
}
|
||||
|
||||
// Solver object
|
||||
m_eigs = new SymEigsSolver<Scalar, LARGEST_ALGE, SVDMatOp<Scalar> >(m_op, ncomp, ncv);
|
||||
}
|
||||
|
||||
// Destructor
|
||||
virtual ~PartialSVDSolver()
|
||||
{
|
||||
delete m_eigs;
|
||||
delete m_op;
|
||||
}
|
||||
|
||||
// Computation
|
||||
int compute(int maxit = 1000, Scalar tol = 1e-10)
|
||||
{
|
||||
m_eigs->init();
|
||||
m_nconv = m_eigs->compute(maxit, tol);
|
||||
|
||||
return m_nconv;
|
||||
}
|
||||
|
||||
// The converged singular values
|
||||
Vector singular_values() const
|
||||
{
|
||||
Vector svals = m_eigs->eigenvalues().cwiseSqrt();
|
||||
|
||||
return svals;
|
||||
}
|
||||
|
||||
// The converged left singular vectors
|
||||
Matrix matrix_U(int nu)
|
||||
{
|
||||
if (m_evecs.cols() < 1)
|
||||
{
|
||||
m_evecs = m_eigs->eigenvectors();
|
||||
}
|
||||
nu = std::min(nu, m_nconv);
|
||||
if (m_m <= m_n)
|
||||
{
|
||||
return m_evecs.leftCols(nu);
|
||||
}
|
||||
|
||||
return m_mat * (m_evecs.leftCols(nu).array().rowwise() / m_eigs->eigenvalues().head(nu).transpose().array().sqrt()).matrix();
|
||||
}
|
||||
|
||||
// The converged right singular vectors
|
||||
Matrix matrix_V(int nv)
|
||||
{
|
||||
if (m_evecs.cols() < 1)
|
||||
{
|
||||
m_evecs = m_eigs->eigenvectors();
|
||||
}
|
||||
nv = std::min(nv, m_nconv);
|
||||
if (m_m > m_n)
|
||||
{
|
||||
return m_evecs.leftCols(nv);
|
||||
}
|
||||
|
||||
return m_mat.transpose() * (m_evecs.leftCols(nv).array().rowwise() / m_eigs->eigenvalues().head(nv).transpose().array().sqrt()).matrix();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Spectra
|
||||
|
||||
#endif // PARTIAL_SVD_SOLVER_H
|
|
@ -136,6 +136,8 @@ target_include_directories(gtsam BEFORE PUBLIC
|
|||
# SuiteSparse_config
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/SuiteSparse_config>
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/SuiteSparse_config>
|
||||
# Spectra
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/Spectra>
|
||||
# CCOLAMD
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/CCOLAMD/Include>
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/CCOLAMD>
|
||||
|
|
Loading…
Reference in New Issue