Linting and getAnchor wrap

release/4.3a0
Frank dellaert 2020-08-24 14:40:29 -04:00
parent 02cd45d4b8
commit 4dd9ee5f1f
3 changed files with 108 additions and 85 deletions

View File

@ -2925,6 +2925,7 @@ class ShonanAveragingParameters2 {
void setOptimalityThreshold(double value);
double getOptimalityThreshold() const;
void setAnchor(size_t index, const gtsam::Rot2& value);
pair<size_t, Rot2> getAnchor();
void setAnchorWeight(double value);
double getAnchorWeight() const;
void setKarcherWeight(double value);
@ -2940,6 +2941,7 @@ class ShonanAveragingParameters3 {
void setOptimalityThreshold(double value);
double getOptimalityThreshold() const;
void setAnchor(size_t index, const gtsam::Rot3& value);
pair<size_t, Rot3> getAnchor();
void setAnchorWeight(double value);
double getAnchorWeight() const;
void setKarcherWeight(double value);

View File

@ -16,26 +16,25 @@
* @brief Shonan Averaging algorithm
*/
#include <gtsam/sfm/ShonanAveraging.h>
#include <SymEigsSolver.h>
#include <gtsam/linear/PCGSolver.h>
#include <gtsam/linear/SubgraphPreconditioner.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/nonlinear/NonlinearEquality.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/sfm/ShonanAveraging.h>
#include <gtsam/sfm/ShonanFactor.h>
#include <gtsam/sfm/ShonanGaugeFactor.h>
#include <gtsam/slam/FrobeniusFactor.h>
#include <gtsam/slam/KarcherMeanFactor-inl.h>
#include <gtsam/sfm/ShonanFactor.h>
#include <Eigen/Eigenvalues>
#include <SymEigsSolver.h>
#include <algorithm>
#include <complex>
#include <iostream>
#include <map>
#include <random>
#include <set>
#include <vector>
namespace gtsam {
@ -50,8 +49,11 @@ template <size_t d>
ShonanAveragingParameters<d>::ShonanAveragingParameters(
const LevenbergMarquardtParams &_lm, const std::string &method,
double optimalityThreshold, double alpha, double beta, double gamma)
: lm(_lm), optimalityThreshold(optimalityThreshold), alpha(alpha),
beta(beta), gamma(gamma) {
: lm(_lm),
optimalityThreshold(optimalityThreshold),
alpha(alpha),
beta(beta),
gamma(gamma) {
// By default, we will do conjugate gradient
lm.linearSolverType = LevenbergMarquardtParams::Iterative;
@ -92,29 +94,40 @@ template struct ShonanAveragingParameters<3>;
/* ************************************************************************* */
// Calculate number of unknown rotations referenced by measurements
// Throws exception of keys are not numbered as expected (may change in future).
template <size_t d>
static size_t
NrUnknowns(const typename ShonanAveraging<d>::Measurements &measurements) {
static size_t NrUnknowns(
const typename ShonanAveraging<d>::Measurements &measurements) {
Key maxKey = 0;
std::set<Key> keys;
for (const auto &measurement : measurements) {
keys.insert(measurement.key1());
keys.insert(measurement.key2());
for (const Key &key : measurement.keys()) {
maxKey = std::max(key, maxKey);
keys.insert(key);
}
}
return keys.size();
size_t N = keys.size();
if (maxKey != N - 1) {
throw std::invalid_argument(
"As of now, ShonanAveraging expects keys numbered 0..N-1");
}
return N;
}
/* ************************************************************************* */
template <size_t d>
ShonanAveraging<d>::ShonanAveraging(const Measurements &measurements,
const Parameters &parameters)
: parameters_(parameters), measurements_(measurements),
: parameters_(parameters),
measurements_(measurements),
nrUnknowns_(NrUnknowns<d>(measurements)) {
for (const auto &measurement : measurements_) {
const auto &model = measurement.noiseModel();
if (model && model->dim() != SO<d>::dimension) {
measurement.print("Factor with incorrect noise model:\n");
throw std::invalid_argument("ShonanAveraging: measurements passed to "
"constructor have incorrect dimension.");
throw std::invalid_argument(
"ShonanAveraging: measurements passed to "
"constructor have incorrect dimension.");
}
}
Q_ = buildQ();
@ -196,7 +209,7 @@ Matrix ShonanAveraging<d>::StiefelElementMatrix(const Values &values) {
Matrix S(p, N * d);
for (const auto it : values.filter<SOn>()) {
S.middleCols<d>(it.key * d) =
it.value.matrix().leftCols<d>(); // project Qj to Stiefel
it.value.matrix().leftCols<d>(); // project Qj to Stiefel
}
return S;
}
@ -227,7 +240,8 @@ Values ShonanAveraging<3>::projectFrom(size_t p, const Values &values) const {
}
/* ************************************************************************* */
template <size_t d> static Matrix RoundSolutionS(const Matrix &S) {
template <size_t d>
static Matrix RoundSolutionS(const Matrix &S) {
const size_t N = S.cols() / d;
// First, compute a thin SVD of S
Eigen::JacobiSVD<Matrix> svd(S, Eigen::ComputeThinV);
@ -246,8 +260,7 @@ template <size_t d> static Matrix RoundSolutionS(const Matrix &S) {
for (size_t i = 0; i < N; ++i) {
// Compute the determinant of the ith dxd block of R
double determinant = R.middleCols<d>(d * i).determinant();
if (determinant > 0)
++numPositiveBlocks;
if (determinant > 0) ++numPositiveBlocks;
}
if (numPositiveBlocks < N / 2) {
@ -263,7 +276,8 @@ template <size_t d> static Matrix RoundSolutionS(const Matrix &S) {
}
/* ************************************************************************* */
template <> Values ShonanAveraging<2>::roundSolutionS(const Matrix &S) const {
template <>
Values ShonanAveraging<2>::roundSolutionS(const Matrix &S) const {
// Round to a 2*2N matrix
Matrix R = RoundSolutionS<2>(S);
@ -276,7 +290,8 @@ template <> Values ShonanAveraging<2>::roundSolutionS(const Matrix &S) const {
return values;
}
template <> Values ShonanAveraging<3>::roundSolutionS(const Matrix &S) const {
template <>
Values ShonanAveraging<3>::roundSolutionS(const Matrix &S) const {
// Round to a 3*3N matrix
Matrix R = RoundSolutionS<3>(S);
@ -332,7 +347,8 @@ static double Kappa(const BinaryMeasurement<T> &measurement) {
}
/* ************************************************************************* */
template <size_t d> Sparse ShonanAveraging<d>::buildD() const {
template <size_t d>
Sparse ShonanAveraging<d>::buildD() const {
// Each measurement contributes 2*d elements along the diagonal of the
// degree matrix.
static constexpr size_t stride = 2 * d;
@ -366,7 +382,8 @@ template <size_t d> Sparse ShonanAveraging<d>::buildD() const {
}
/* ************************************************************************* */
template <size_t d> Sparse ShonanAveraging<d>::buildQ() const {
template <size_t d>
Sparse ShonanAveraging<d>::buildQ() const {
// Each measurement contributes 2*d^2 elements on a pair of symmetric
// off-diagonal blocks
static constexpr size_t stride = 2 * d * d;
@ -513,12 +530,12 @@ struct MatrixProdFunctor {
// - We've been using 10^-4 for the nonnegativity tolerance
// - for numLanczosVectors, 20 is a good default value
static bool
SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue,
Vector *minEigenVector = 0, size_t *numIterations = 0,
size_t maxIterations = 1000,
double minEigenvalueNonnegativityTolerance = 10e-4,
Eigen::Index numLanczosVectors = 20) {
static bool SparseMinimumEigenValue(
const Sparse &A, const Matrix &S, double *minEigenValue,
Vector *minEigenVector = 0, size_t *numIterations = 0,
size_t maxIterations = 1000,
double minEigenvalueNonnegativityTolerance = 10e-4,
Eigen::Index numLanczosVectors = 20) {
// a. Estimate the largest-magnitude eigenvalue of this matrix using Lanczos
MatrixProdFunctor lmOperator(A);
Spectra::SymEigsSolver<double, Spectra::SELECT_EIGENVALUE::LARGEST_MAGN,
@ -530,8 +547,7 @@ SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue,
maxIterations, 1e-4, Spectra::SELECT_EIGENVALUE::LARGEST_MAGN);
// Check convergence and bail out if necessary
if (lmConverged != 1)
return false;
if (lmConverged != 1) return false;
const double lmEigenValue = lmEigenValueSolver.eigenvalues()(0);
@ -541,7 +557,7 @@ SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue,
*minEigenValue = lmEigenValue;
if (minEigenVector) {
*minEigenVector = lmEigenValueSolver.eigenvectors(1).col(0);
minEigenVector->normalize(); // Ensure that this is a unit vector
minEigenVector->normalize(); // Ensure that this is a unit vector
}
return true;
}
@ -578,7 +594,7 @@ SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue,
Vector perturbation(v0.size());
perturbation.setRandom();
perturbation.normalize();
Vector xinit = v0 + (.03 * v0.norm()) * perturbation; // Perturb v0 by ~3%
Vector xinit = v0 + (.03 * v0.norm()) * perturbation; // Perturb v0 by ~3%
// Use this to initialize the eigensolver
minEigenValueSolver.init(xinit.data());
@ -590,21 +606,20 @@ SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue,
maxIterations, minEigenvalueNonnegativityTolerance / lmEigenValue,
Spectra::SELECT_EIGENVALUE::LARGEST_MAGN);
if (minConverged != 1)
return false;
if (minConverged != 1) return false;
*minEigenValue = minEigenValueSolver.eigenvalues()(0) + 2 * lmEigenValue;
if (minEigenVector) {
*minEigenVector = minEigenValueSolver.eigenvectors(1).col(0);
minEigenVector->normalize(); // Ensure that this is a unit vector
minEigenVector->normalize(); // Ensure that this is a unit vector
}
if (numIterations)
*numIterations = minEigenValueSolver.num_iterations();
if (numIterations) *numIterations = minEigenValueSolver.num_iterations();
return true;
}
/* ************************************************************************* */
template <size_t d> Sparse ShonanAveraging<d>::computeA(const Matrix &S) const {
template <size_t d>
Sparse ShonanAveraging<d>::computeA(const Matrix &S) const {
auto Lambda = computeLambda(S);
return Lambda - Q_;
}
@ -628,8 +643,8 @@ double ShonanAveraging<d>::computeMinEigenValue(const Values &values,
/* ************************************************************************* */
template <size_t d>
std::pair<double, Vector>
ShonanAveraging<d>::computeMinEigenVector(const Values &values) const {
std::pair<double, Vector> ShonanAveraging<d>::computeMinEigenVector(
const Values &values) const {
Vector minEigenVector;
double minEigenValue = computeMinEigenValue(values, &minEigenVector);
return std::make_pair(minEigenValue, minEigenVector);
@ -750,7 +765,8 @@ Values ShonanAveraging<d>::initializeRandomly(std::mt19937 &rng) const {
}
/* ************************************************************************* */
template <size_t d> Values ShonanAveraging<d>::initializeRandomly() const {
template <size_t d>
Values ShonanAveraging<d>::initializeRandomly() const {
return initializeRandomly(kRandomNumberGenerator);
}
@ -759,7 +775,7 @@ template <size_t d>
Values ShonanAveraging<d>::initializeRandomlyAt(size_t p,
std::mt19937 &rng) const {
const Values randomRotations = initializeRandomly(rng);
return LiftTo<Rot3>(p, randomRotations); // lift to p!
return LiftTo<Rot3>(p, randomRotations); // lift to p!
}
/* ************************************************************************* */
@ -823,8 +839,8 @@ ShonanAveraging3::ShonanAveraging3(string g2oFile, const Parameters &parameters)
// Extract Rot3 measurement from Pose3 betweenfactors
// Modeled after similar function in dataset.cpp
static BinaryMeasurement<Rot3>
convert(const BetweenFactor<Pose3>::shared_ptr &f) {
static BinaryMeasurement<Rot3> convert(
const BetweenFactor<Pose3>::shared_ptr &f) {
auto gaussian =
boost::dynamic_pointer_cast<noiseModel::Gaussian>(f->noiseModel());
if (!gaussian)
@ -837,12 +853,11 @@ convert(const BetweenFactor<Pose3>::shared_ptr &f) {
noiseModel::Gaussian::Covariance(M.block<3, 3>(3, 3), true));
}
static ShonanAveraging3::Measurements
extractRot3Measurements(const BetweenFactorPose3s &factors) {
static ShonanAveraging3::Measurements extractRot3Measurements(
const BetweenFactorPose3s &factors) {
ShonanAveraging3::Measurements result;
result.reserve(factors.size());
for (auto f : factors)
result.push_back(convert(f));
for (auto f : factors) result.push_back(convert(f));
return result;
}
@ -851,4 +866,4 @@ ShonanAveraging3::ShonanAveraging3(const BetweenFactorPose3s &factors,
: ShonanAveraging<3>(extractRot3Measurements(factors), parameters) {}
/* ************************************************************************* */
} // namespace gtsam
} // namespace gtsam

View File

@ -20,36 +20,38 @@
#include <gtsam/base/Matrix.h>
#include <gtsam/base/Vector.h>
#include <gtsam/dllexport.h>
#include <gtsam/geometry/Rot2.h>
#include <gtsam/geometry/Rot3.h>
#include <gtsam/nonlinear/LevenbergMarquardtParams.h>
#include <gtsam/sfm/BinaryMeasurement.h>
#include <gtsam/slam/dataset.h>
#include <gtsam/dllexport.h>
#include <Eigen/Sparse>
#include <map>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
namespace gtsam {
class NonlinearFactorGraph;
class LevenbergMarquardtOptimizer;
/// Parameters governing optimization etc.
template <size_t d> struct GTSAM_EXPORT ShonanAveragingParameters {
template <size_t d>
struct GTSAM_EXPORT ShonanAveragingParameters {
// Select Rot2 or Rot3 interface based template parameter d
using Rot = typename std::conditional<d == 2, Rot2, Rot3>::type;
using Anchor = std::pair<size_t, Rot>;
// Paremeters themselves:
LevenbergMarquardtParams lm; // LM parameters
double optimalityThreshold; // threshold used in checkOptimality
Anchor anchor; // pose to use as anchor if not Karcher
double alpha; // weight of anchor-based prior (default 0)
double beta; // weight of Karcher-based prior (default 1)
double gamma; // weight of gauge-fixing factors (default 0)
LevenbergMarquardtParams lm; // LM parameters
double optimalityThreshold; // threshold used in checkOptimality
Anchor anchor; // pose to use as anchor if not Karcher
double alpha; // weight of anchor-based prior (default 0)
double beta; // weight of Karcher-based prior (default 1)
double gamma; // weight of gauge-fixing factors (default 0)
ShonanAveragingParameters(const LevenbergMarquardtParams &lm =
LevenbergMarquardtParams::CeresDefaults(),
@ -64,6 +66,7 @@ template <size_t d> struct GTSAM_EXPORT ShonanAveragingParameters {
double getOptimalityThreshold() const { return optimalityThreshold; }
void setAnchor(size_t index, const Rot &value) { anchor = {index, value}; }
std::pair<size_t, Rot> getAnchor() { return anchor; }
void setAnchorWeight(double value) { alpha = value; }
double getAnchorWeight() { return alpha; }
@ -93,8 +96,9 @@ using ShonanAveragingParameters3 = ShonanAveragingParameters<3>;
* European Computer Vision Conference, 2020.
* You can view our ECCV spotlight video at https://youtu.be/5ppaqMyHtE0
*/
template <size_t d> class GTSAM_EXPORT ShonanAveraging {
public:
template <size_t d>
class GTSAM_EXPORT ShonanAveraging {
public:
using Sparse = Eigen::SparseMatrix<double>;
// Define the Parameters type and use its typedef of the rotation type:
@ -105,13 +109,13 @@ public:
// TODO(frank): use BinaryMeasurement?
using Measurements = std::vector<BinaryMeasurement<Rot>>;
private:
private:
Parameters parameters_;
Measurements measurements_;
size_t nrUnknowns_;
Sparse D_; // Sparse (diagonal) degree matrix
Sparse Q_; // Sparse measurement matrix, == \tilde{R} in Eriksson18cvpr
Sparse L_; // connection Laplacian L = D - Q, needed for optimality check
Sparse D_; // Sparse (diagonal) degree matrix
Sparse Q_; // Sparse measurement matrix, == \tilde{R} in Eriksson18cvpr
Sparse L_; // connection Laplacian L = D - Q, needed for optimality check
/**
* Build 3Nx3N sparse matrix consisting of rotation measurements, arranged as
@ -122,7 +126,7 @@ private:
/// Build 3Nx3N sparse degree matrix D
Sparse buildD() const;
public:
public:
/// @name Standard Constructors
/// @{
@ -156,12 +160,12 @@ public:
/// @name Matrix API (advanced use, debugging)
/// @{
Sparse D() const { return D_; } ///< Sparse version of D
Matrix denseD() const { return Matrix(D_); } ///< Dense version of D
Sparse Q() const { return Q_; } ///< Sparse version of Q
Matrix denseQ() const { return Matrix(Q_); } ///< Dense version of Q
Sparse L() const { return L_; } ///< Sparse version of L
Matrix denseL() const { return Matrix(L_); } ///< Dense version of L
Sparse D() const { return D_; } ///< Sparse version of D
Matrix denseD() const { return Matrix(D_); } ///< Dense version of D
Sparse Q() const { return Q_; } ///< Sparse version of Q
Matrix denseQ() const { return Matrix(Q_); } ///< Dense version of Q
Sparse L() const { return L_; } ///< Sparse version of L
Matrix denseL() const { return Matrix(L_); } ///< Dense version of L
/// Version that takes pxdN Stiefel manifold elements
Sparse computeLambda(const Matrix &S) const;
@ -220,11 +224,10 @@ public:
* @param minEigenVector corresponding to minEigenValue at level p-1
* @return values of type SO(p)
*/
Values
initializeWithDescent(size_t p, const Values &values,
const Vector &minEigenVector, double minEigenValue,
double gradienTolerance = 1e-2,
double preconditionedGradNormTolerance = 1e-4) const;
Values initializeWithDescent(
size_t p, const Values &values, const Vector &minEigenVector,
double minEigenValue, double gradienTolerance = 1e-2,
double preconditionedGradNormTolerance = 1e-4) const;
/// @}
/// @name Advanced API
/// @{
@ -300,7 +303,8 @@ public:
Values roundSolution(const Values &values) const;
/// Lift Values of type T to SO(p)
template <class T> static Values LiftTo(size_t p, const Values &values) {
template <class T>
static Values LiftTo(size_t p, const Values &values) {
Values result;
for (const auto it : values.filter<T>()) {
result.insert(it.key, SOn::Lift(p, it.value.matrix()));
@ -346,20 +350,22 @@ public:
// convenience interface with file access.
class ShonanAveraging2 : public ShonanAveraging<2> {
public:
public:
ShonanAveraging2(const Measurements &measurements,
const Parameters &parameters = Parameters());
ShonanAveraging2(string g2oFile, const Parameters &parameters = Parameters());
explicit ShonanAveraging2(string g2oFile,
const Parameters &parameters = Parameters());
};
class ShonanAveraging3 : public ShonanAveraging<3> {
public:
public:
ShonanAveraging3(const Measurements &measurements,
const Parameters &parameters = Parameters());
ShonanAveraging3(string g2oFile, const Parameters &parameters = Parameters());
explicit ShonanAveraging3(string g2oFile,
const Parameters &parameters = Parameters());
// TODO(frank): Deprecate after we land pybind wrapper
ShonanAveraging3(const BetweenFactorPose3s &factors,
const Parameters &parameters = Parameters());
};
} // namespace gtsam
} // namespace gtsam