Added PowerMinimumEigenValue method and unittest.
parent
e02633c745
commit
fc112a4dc7
|
@ -480,31 +480,30 @@ static bool PowerMinimumEigenValue(
|
|||
Eigen::Index numLanczosVectors = 20) {
|
||||
|
||||
// a. Compute dominant eigenpair of S using power method
|
||||
MatrixProdFunctor lmOperator(A, 1, std::min(numLanczosVectors, A.rows()));
|
||||
PowerFunctor lmOperator(A, 1, std::min(numLanczosVectors, A.rows()));
|
||||
lmOperator.init();
|
||||
|
||||
const int lmConverged = lmEigenValueSolver.compute(
|
||||
maxIterations, 1e-4);
|
||||
const int lmConverged = lmOperator.compute(
|
||||
maxIterations, 1e-5);
|
||||
|
||||
// Check convergence and bail out if necessary
|
||||
if (lmConverged != 1) return false;
|
||||
|
||||
const double lmEigenValue = lmEigenValueSolver.eigenvalues()(0);
|
||||
const double lmEigenValue = lmOperator.eigenvalues()(0);
|
||||
|
||||
if (lmEigenValue < 0) {
|
||||
// The largest-magnitude eigenvalue is negative, and therefore also the
|
||||
// minimum eigenvalue, so just return this solution
|
||||
*minEigenValue = lmEigenValue;
|
||||
if (minEigenVector) {
|
||||
*minEigenVector = lmEigenValueSolver.eigenvectors().col(0);
|
||||
*minEigenVector = lmOperator.eigenvectors().col(0);
|
||||
minEigenVector->normalize(); // Ensure that this is a unit vector
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
Matrix C = lmEigenValue * Matrix::Identity(A.rows(), A.cols()) - A;
|
||||
MatrixProdFunctor minShiftedOperator(
|
||||
C, 1, std::min(numLanczosVectors, A.rows()));
|
||||
Sparse C = lmEigenValue * Matrix::Identity(A.rows(), A.cols()) - A;
|
||||
PowerFunctor minShiftedOperator(C, 1, std::min(numLanczosVectors, C.rows()));
|
||||
minShiftedOperator.init();
|
||||
|
||||
const int minConverged = minShiftedOperator.compute(
|
||||
|
@ -521,6 +520,36 @@ static bool PowerMinimumEigenValue(
|
|||
return true;
|
||||
}
|
||||
|
||||
/** This is a lightweight struct used in conjunction with Spectra to compute
|
||||
* the minimum eigenvalue and eigenvector of a sparse matrix A; it has a single
|
||||
* nontrivial function, perform_op(x,y), that computes and returns the product
|
||||
* y = (A + sigma*I) x */
|
||||
struct MatrixProdFunctor {
|
||||
// Const reference to an externally-held matrix whose minimum-eigenvalue we
|
||||
// want to compute
|
||||
const Sparse &A_;
|
||||
|
||||
// Spectral shift
|
||||
double sigma_;
|
||||
|
||||
// Constructor
|
||||
explicit MatrixProdFunctor(const Sparse &A, double sigma = 0)
|
||||
: A_(A), sigma_(sigma) {}
|
||||
|
||||
int rows() const { return A_.rows(); }
|
||||
int cols() const { return A_.cols(); }
|
||||
|
||||
// Matrix-vector multiplication operation
|
||||
void perform_op(const double *x, double *y) const {
|
||||
// Wrap the raw arrays as Eigen Vector types
|
||||
Eigen::Map<const Vector> X(x, rows());
|
||||
Eigen::Map<Vector> Y(y, rows());
|
||||
|
||||
// Do the multiplication using wrapped Eigen vectors
|
||||
Y = A_ * X + sigma_ * X;
|
||||
}
|
||||
};
|
||||
|
||||
/// Function to compute the minimum eigenvalue of A using Lanczos in Spectra.
|
||||
/// This does 2 things:
|
||||
///
|
||||
|
@ -659,6 +688,23 @@ double ShonanAveraging<d>::computeMinEigenValue(const Values &values,
|
|||
return minEigenValue;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <size_t d>
|
||||
double ShonanAveraging<d>::computeMinEigenValueAP(const Values &values,
|
||||
Vector *minEigenVector) const {
|
||||
assert(values.size() == nrUnknowns());
|
||||
const Matrix S = StiefelElementMatrix(values);
|
||||
auto A = computeA(S);
|
||||
|
||||
double minEigenValue;
|
||||
bool success = PowerMinimumEigenValue(A, S, &minEigenValue, minEigenVector);
|
||||
if (!success) {
|
||||
throw std::runtime_error(
|
||||
"PowerMinimumEigenValue failed to compute minimum eigenvalue.");
|
||||
}
|
||||
return minEigenValue;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <size_t d>
|
||||
std::pair<double, Vector> ShonanAveraging<d>::computeMinEigenVector(
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtParams.h>
|
||||
#include <gtsam/sfm/BinaryMeasurement.h>
|
||||
#include <gtsam/sfm/PowerMethod.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
#include <Eigen/Sparse>
|
||||
|
@ -202,6 +203,13 @@ class GTSAM_EXPORT ShonanAveraging {
|
|||
double computeMinEigenValue(const Values &values,
|
||||
Vector *minEigenVector = nullptr) const;
|
||||
|
||||
/**
|
||||
* Compute minimum eigenvalue with accelerated power method.
|
||||
* @param values: should be of type SOn
|
||||
*/
|
||||
double computeMinEigenValueAP(const Values &values,
|
||||
Vector *minEigenVector = nullptr) const;
|
||||
|
||||
/// Project pxdN Stiefel manifold matrix S to Rot3^N
|
||||
Values roundSolutionS(const Matrix &S) const;
|
||||
|
||||
|
|
|
@ -166,9 +166,14 @@ TEST(ShonanAveraging3, CheckWithEigen) {
|
|||
for (int i = 1; i < lambdas.size(); i++)
|
||||
minEigenValue = min(lambdas(i), minEigenValue);
|
||||
|
||||
// Compute Eigenvalue with Accelerated Power method
|
||||
double lambdaAP = kShonan.computeMinEigenValueAP(Qstar3);
|
||||
|
||||
// Actual check
|
||||
EXPECT_DOUBLES_EQUAL(0, lambda, 1e-11);
|
||||
EXPECT_DOUBLES_EQUAL(0, minEigenValue, 1e-11);
|
||||
EXPECT_DOUBLES_EQUAL(0, lambdaAP, 1e-11);
|
||||
|
||||
|
||||
// Construct test descent direction (as minEigenVector is not predictable
|
||||
// across platforms, being one from a basically flat 3d- subspace)
|
||||
|
|
Loading…
Reference in New Issue