Merge branch 'develop' into fix/warning_in_smart_factor
commit
06bd104c19
12
INSTALL.md
12
INSTALL.md
|
@ -41,11 +41,6 @@ $ make install
|
|||
- MacOS 10.6 - 10.14
|
||||
- Windows 7, 8, 8.1, 10
|
||||
|
||||
Known issues:
|
||||
|
||||
- MSVC 2013 is not yet supported because it cannot build the serialization module
|
||||
of Boost 1.55 (or earlier).
|
||||
|
||||
2. GTSAM makes extensive use of debug assertions, and we highly recommend you work
|
||||
in Debug mode while developing (enabled by default). Likewise, it is imperative
|
||||
that you switch to release mode when running finished code and for timing. GTSAM
|
||||
|
@ -70,6 +65,13 @@ execute commands as follows for an out-of-source build:
|
|||
This will build the library and unit tests, run all of the unit tests,
|
||||
and then install the library itself.
|
||||
|
||||
## Known Issues
|
||||
|
||||
- When using `GTSAM_BUILD_WITH_MARCH_NATIVE=ON`, you may encounter issues in running tests which we are still investigating:
|
||||
- Use of a version of GCC < 7.5 results in an "Indeterminant Linear System" error for `testSmartProjectionFactor`.
|
||||
- Use of Boost version < 1.67 with clang will give a segfault for mulitple test cases.
|
||||
- MSVC 2013 is not yet supported because it cannot build the serialization module of Boost 1.55 (or earlier).
|
||||
|
||||
# Windows Installation
|
||||
|
||||
This section details how to build a GTSAM `.sln` file using Visual Studio.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -153,7 +153,7 @@ const Eigen::IOFormat& matlabFormat() {
|
|||
/* ************************************************************************* */
|
||||
//3 argument call
|
||||
void print(const Matrix& A, const string &s, ostream& stream) {
|
||||
cout << s << A.format(matlabFormat()) << endl;
|
||||
stream << s << A.format(matlabFormat()) << endl;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -33,9 +33,9 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <boost/shared_ptr.hpp>
|
||||
#include <boost/concept_check.hpp>
|
||||
#include <stdio.h>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
#define GTSAM_PRINT(x)((x).print(#x))
|
||||
|
@ -72,10 +72,10 @@ namespace gtsam {
|
|||
}; // \ Testable
|
||||
|
||||
inline void print(float v, const std::string& s = "") {
|
||||
printf("%s%f\n",s.c_str(),v);
|
||||
std::cout << (s.empty() ? s : s + " ") << v << std::endl;
|
||||
}
|
||||
inline void print(double v, const std::string& s = "") {
|
||||
printf("%s%lf\n",s.c_str(),v);
|
||||
std::cout << (s.empty() ? s : s + " ") << v << std::endl;
|
||||
}
|
||||
|
||||
/** Call equal on the object */
|
||||
|
|
|
@ -372,16 +372,19 @@ bool assert_stdout_equal(const std::string& expected, const V& actual) {
|
|||
|
||||
/**
|
||||
* Capture print function output and compare against string.
|
||||
*
|
||||
* @param s: Optional string to pass to the print() method.
|
||||
*/
|
||||
template<class V>
|
||||
bool assert_print_equal(const std::string& expected, const V& actual) {
|
||||
template <class V>
|
||||
bool assert_print_equal(const std::string& expected, const V& actual,
|
||||
const std::string& s = "") {
|
||||
// Redirect output to buffer so we can compare
|
||||
std::stringstream buffer;
|
||||
// Save the original output stream so we can reset later
|
||||
std::streambuf* old = std::cout.rdbuf(buffer.rdbuf());
|
||||
|
||||
// We test against actual std::cout for faithful reproduction
|
||||
actual.print();
|
||||
actual.print(s);
|
||||
|
||||
// Get output string and reset stdout
|
||||
std::string actual_ = buffer.str();
|
||||
|
|
|
@ -30,10 +30,10 @@ double f(const Vector2& x) {
|
|||
/* ************************************************************************* */
|
||||
//
|
||||
TEST(testNumericalDerivative, numericalGradient) {
|
||||
Vector2 x(1, 1);
|
||||
Vector2 x(1, 1.1);
|
||||
|
||||
Vector expected(2);
|
||||
expected << cos(x(1)), -sin(x(0));
|
||||
expected << cos(x(0)), -sin(x(1));
|
||||
|
||||
Vector actual = numericalGradient<Vector2>(f, x);
|
||||
|
||||
|
@ -42,7 +42,7 @@ TEST(testNumericalDerivative, numericalGradient) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
TEST(testNumericalDerivative, numericalHessian) {
|
||||
Vector2 x(1, 1);
|
||||
Vector2 x(1, 1.1);
|
||||
|
||||
Matrix expected(2, 2);
|
||||
expected << -sin(x(0)), 0.0, 0.0, -cos(x(1));
|
||||
|
|
|
@ -48,7 +48,13 @@ Matrix3 Pose2::matrix() const {
|
|||
|
||||
/* ************************************************************************* */
|
||||
void Pose2::print(const string& s) const {
|
||||
cout << s << "(" << t_.x() << ", " << t_.y() << ", " << r_.theta() << ")" << endl;
|
||||
std::cout << (s.empty() ? s : s + " ") << *this << std::endl;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::ostream &operator<<(std::ostream &os, const Pose2& pose) {
|
||||
os << "(" << pose.x() << ", " << pose.y() << ", " << pose.theta() << ")";
|
||||
return os;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -287,6 +287,10 @@ public:
|
|||
*/
|
||||
static std::pair<size_t, size_t> rotationInterval() { return std::make_pair(2, 2); }
|
||||
|
||||
/// Output stream operator
|
||||
GTSAM_EXPORT
|
||||
friend std::ostream &operator<<(std::ostream &os, const Pose2& p);
|
||||
|
||||
/// @}
|
||||
|
||||
private:
|
||||
|
|
|
@ -14,16 +14,17 @@
|
|||
* @brief Unit tests for Pose2 class
|
||||
*/
|
||||
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/geometry/Rot2.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/testLie.h>
|
||||
#include <gtsam/base/lieProxies.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <boost/optional.hpp>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
#include <gtsam/base/lieProxies.h>
|
||||
#include <gtsam/base/testLie.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/geometry/Rot2.h>
|
||||
|
||||
#include <boost/assign/std/vector.hpp> // for operator +=
|
||||
#include <boost/optional.hpp>
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
|
||||
|
@ -910,6 +911,22 @@ TEST(Pose2 , TransformCovariance3) {
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Pose2, Print) {
|
||||
Pose2 pose(Rot2::identity(), Point2(1, 2));
|
||||
|
||||
// Generate the expected output
|
||||
string s = "Planar Pose";
|
||||
string expected_stdout = "(1, 2, 0)";
|
||||
string expected1 = expected_stdout + "\n";
|
||||
string expected2 = s + " " + expected1;
|
||||
|
||||
EXPECT(assert_stdout_equal(expected_stdout, pose));
|
||||
|
||||
EXPECT(assert_print_equal(expected1, pose));
|
||||
EXPECT(assert_print_equal(expected2, pose, s));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
@ -2623,6 +2623,8 @@ virtual class BearingRangeFactor : gtsam::NoiseModelFactor {
|
|||
const BEARING& measuredBearing, const RANGE& measuredRange,
|
||||
const gtsam::noiseModel::Base* noiseModel);
|
||||
|
||||
BearingRange<POSE, POINT, BEARING, RANGE> measured() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
|
|
@ -0,0 +1,176 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file AcceleratedPowerMethod.h
|
||||
* @date Sept 2020
|
||||
* @author Jing Wu
|
||||
* @brief accelerated power method for fast eigenvalue and eigenvector
|
||||
* computation
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/linear/PowerMethod.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using Sparse = Eigen::SparseMatrix<double>;
|
||||
|
||||
/**
|
||||
* \brief Compute maximum Eigenpair with accelerated power method
|
||||
*
|
||||
* References :
|
||||
* 1) G. Golub and C. V. Loan, Matrix Computations, 3rd ed. Baltimore, Johns
|
||||
* Hopkins University Press, 1996, pp.405-411
|
||||
* 2) Rosen, D. and Carlone, L., 2017, September. Computational
|
||||
* enhancements for certifiably correct SLAM. In Proceedings of the
|
||||
* International Conference on Intelligent Robots and Systems.
|
||||
* 3) Yulun Tian and Kasra Khosoussi and David M. Rosen and Jonathan P. How,
|
||||
* 2020, Aug, Distributed Certifiably Correct Pose-Graph Optimization, Arxiv
|
||||
* 4) C. de Sa, B. He, I. Mitliagkas, C. Ré, and P. Xu, “Accelerated
|
||||
* stochastic power iteration,” in Proc. Mach. Learn. Res., no. 84, 2018, pp.
|
||||
* 58–67
|
||||
*
|
||||
* It performs the following iteration: \f$ x_{k+1} = A * x_k - \beta *
|
||||
* x_{k-1} \f$ where A is the aim matrix we want to get eigenpair of, x is the
|
||||
* Ritz vector
|
||||
*
|
||||
* Template argument Operator just needs multiplication operator
|
||||
*
|
||||
*/
|
||||
template <class Operator>
|
||||
class AcceleratedPowerMethod : public PowerMethod<Operator> {
|
||||
|
||||
double beta_ = 0; // a Polyak momentum term
|
||||
|
||||
Vector previousVector_; // store previous vector
|
||||
|
||||
public:
|
||||
/**
|
||||
* Constructor from aim matrix A (given as Matrix or Sparse), optional intial
|
||||
* vector as ritzVector
|
||||
*/
|
||||
explicit AcceleratedPowerMethod(
|
||||
const Operator &A, const boost::optional<Vector> initial = boost::none,
|
||||
double initialBeta = 0.0)
|
||||
: PowerMethod<Operator>(A, initial) {
|
||||
// initialize Ritz eigen vector and previous vector
|
||||
this->ritzVector_ = initial ? initial.get() : Vector::Random(this->dim_);
|
||||
this->ritzVector_.normalize();
|
||||
previousVector_ = Vector::Zero(this->dim_);
|
||||
|
||||
// initialize beta_
|
||||
beta_ = initialBeta;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run accelerated power iteration to get ritzVector with beta and previous
|
||||
* two ritzVector x0 and x00, and return y = (A * x0 - \beta * x00) / || A * x0
|
||||
* - \beta * x00 ||
|
||||
*/
|
||||
Vector acceleratedPowerIteration (const Vector &x1, const Vector &x0,
|
||||
const double beta) const {
|
||||
Vector y = this->A_ * x1 - beta * x0;
|
||||
y.normalize();
|
||||
return y;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run accelerated power iteration to get ritzVector with beta and previous
|
||||
* two ritzVector x0 and x00, and return y = (A * x0 - \beta * x00) / || A * x0
|
||||
* - \beta * x00 ||
|
||||
*/
|
||||
Vector acceleratedPowerIteration () const {
|
||||
Vector y = acceleratedPowerIteration(this->ritzVector_, previousVector_, beta_);
|
||||
return y;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tuning the momentum beta using the Best Heavy Ball algorithm in Ref(3), T
|
||||
* is the iteration time to find beta with largest Rayleigh quotient
|
||||
*/
|
||||
double estimateBeta(const size_t T = 10) const {
|
||||
// set initial estimation of maxBeta
|
||||
Vector initVector = this->ritzVector_;
|
||||
const double up = initVector.dot( this->A_ * initVector );
|
||||
const double down = initVector.dot(initVector);
|
||||
const double mu = up / down;
|
||||
double maxBeta = mu * mu / 4;
|
||||
size_t maxIndex;
|
||||
std::vector<double> betas;
|
||||
|
||||
Matrix R = Matrix::Zero(this->dim_, 10);
|
||||
// run T times of iteration to find the beta that has the largest Rayleigh quotient
|
||||
for (size_t t = 0; t < T; t++) {
|
||||
// after each t iteration, reset the betas with the current maxBeta
|
||||
betas = {2 / 3 * maxBeta, 0.99 * maxBeta, maxBeta, 1.01 * maxBeta,
|
||||
1.5 * maxBeta};
|
||||
// iterate through every beta value
|
||||
for (size_t k = 0; k < betas.size(); ++k) {
|
||||
// initialize x0 and x00 in each iteration of each beta
|
||||
Vector x0 = initVector;
|
||||
Vector x00 = Vector::Zero(this->dim_);
|
||||
// run 10 steps of accelerated power iteration with this beta
|
||||
for (size_t j = 1; j < 10; j++) {
|
||||
if (j < 2) {
|
||||
R.col(0) = acceleratedPowerIteration(x0, x00, betas[k]);
|
||||
R.col(1) = acceleratedPowerIteration(R.col(0), x0, betas[k]);
|
||||
} else {
|
||||
R.col(j) = acceleratedPowerIteration(R.col(j - 1), R.col(j - 2),
|
||||
betas[k]);
|
||||
}
|
||||
}
|
||||
// compute the Rayleigh quotient for the randomly sampled vector after
|
||||
// 10 steps of power accelerated iteration
|
||||
const Vector x = R.col(9);
|
||||
const double up = x.dot(this->A_ * x);
|
||||
const double down = x.dot(x);
|
||||
const double mu = up / down;
|
||||
// store the momentum with largest Rayleigh quotient and its according index of beta_
|
||||
if (mu * mu / 4 > maxBeta) {
|
||||
// save the max beta index
|
||||
maxIndex = k;
|
||||
maxBeta = mu * mu / 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
// set beta_ to momentum with largest Rayleigh quotient
|
||||
return betas[maxIndex];
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the accelerated iteration, after performing the
|
||||
* accelerated iteration, calculate the ritz error, repeat this
|
||||
* operation until the ritz error converge. If converged return true, else
|
||||
* false.
|
||||
*/
|
||||
bool compute(size_t maxIterations, double tol) {
|
||||
// Starting
|
||||
bool isConverged = false;
|
||||
|
||||
for (size_t i = 0; i < maxIterations && !isConverged; i++) {
|
||||
++(this->nrIterations_);
|
||||
Vector tmp = this->ritzVector_;
|
||||
// update the ritzVector after accelerated power iteration
|
||||
this->ritzVector_ = acceleratedPowerIteration();
|
||||
// update the previousVector with ritzVector
|
||||
previousVector_ = tmp;
|
||||
// update the ritzValue
|
||||
this->ritzValue_ = this->ritzVector_.dot(this->A_ * this->ritzVector_);
|
||||
isConverged = this->converged(tol);
|
||||
}
|
||||
|
||||
return isConverged;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -100,33 +100,35 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
vector<boost::tuple<size_t, size_t, double> > GaussianFactorGraph::sparseJacobian() const {
|
||||
using SparseTriplets = std::vector<std::tuple<int, int, double> >;
|
||||
SparseTriplets GaussianFactorGraph::sparseJacobian(const Ordering& ordering,
|
||||
size_t& nrows,
|
||||
size_t& ncols) const {
|
||||
gttic_(GaussianFactorGraph_sparseJacobian);
|
||||
// First find dimensions of each variable
|
||||
typedef std::map<Key, size_t> KeySizeMap;
|
||||
KeySizeMap dims;
|
||||
for (const sharedFactor& factor : *this) {
|
||||
if (!static_cast<bool>(factor))
|
||||
continue;
|
||||
for (const auto& factor : *this) {
|
||||
if (!static_cast<bool>(factor)) continue;
|
||||
|
||||
for (GaussianFactor::const_iterator key = factor->begin();
|
||||
key != factor->end(); ++key) {
|
||||
dims[*key] = factor->getDim(key);
|
||||
for (auto it = factor->begin(); it != factor->end(); ++it) {
|
||||
dims[*it] = factor->getDim(it);
|
||||
}
|
||||
}
|
||||
|
||||
// Compute first scalar column of each variable
|
||||
size_t currentColIndex = 0;
|
||||
ncols = 0;
|
||||
KeySizeMap columnIndices = dims;
|
||||
for (const KeySizeMap::value_type& col : dims) {
|
||||
columnIndices[col.first] = currentColIndex;
|
||||
currentColIndex += dims[col.first];
|
||||
for (const auto key : ordering) {
|
||||
columnIndices[key] = ncols;
|
||||
ncols += dims[key];
|
||||
}
|
||||
|
||||
// Iterate over all factors, adding sparse scalar entries
|
||||
typedef boost::tuple<size_t, size_t, double> triplet;
|
||||
vector<triplet> entries;
|
||||
size_t row = 0;
|
||||
for (const sharedFactor& factor : *this) {
|
||||
SparseTriplets entries;
|
||||
entries.reserve(30 * size());
|
||||
nrows = 0;
|
||||
for (const auto& factor : *this) {
|
||||
if (!static_cast<bool>(factor)) continue;
|
||||
|
||||
// Convert to JacobianFactor if necessary
|
||||
|
@ -138,52 +140,60 @@ namespace gtsam {
|
|||
if (hessian)
|
||||
jacobianFactor.reset(new JacobianFactor(*hessian));
|
||||
else
|
||||
throw invalid_argument(
|
||||
"GaussianFactorGraph contains a factor that is neither a JacobianFactor nor a HessianFactor.");
|
||||
throw std::invalid_argument(
|
||||
"GaussianFactorGraph contains a factor that is neither a "
|
||||
"JacobianFactor nor a HessianFactor.");
|
||||
}
|
||||
|
||||
// Whiten the factor and add entries for it
|
||||
// iterate over all variables in the factor
|
||||
const JacobianFactor whitened(jacobianFactor->whiten());
|
||||
for (JacobianFactor::const_iterator key = whitened.begin();
|
||||
key < whitened.end(); ++key) {
|
||||
for (auto key = whitened.begin(); key < whitened.end(); ++key) {
|
||||
JacobianFactor::constABlock whitenedA = whitened.getA(key);
|
||||
// find first column index for this key
|
||||
size_t column_start = columnIndices[*key];
|
||||
for (size_t i = 0; i < (size_t) whitenedA.rows(); i++)
|
||||
for (size_t j = 0; j < (size_t) whitenedA.cols(); j++) {
|
||||
for (size_t i = 0; i < (size_t)whitenedA.rows(); i++)
|
||||
for (size_t j = 0; j < (size_t)whitenedA.cols(); j++) {
|
||||
double s = whitenedA(i, j);
|
||||
if (std::abs(s) > 1e-12)
|
||||
entries.push_back(boost::make_tuple(row + i, column_start + j, s));
|
||||
entries.emplace_back(nrows + i, column_start + j, s);
|
||||
}
|
||||
}
|
||||
|
||||
JacobianFactor::constBVector whitenedb(whitened.getb());
|
||||
size_t bcolumn = currentColIndex;
|
||||
for (size_t i = 0; i < (size_t) whitenedb.size(); i++)
|
||||
entries.push_back(boost::make_tuple(row + i, bcolumn, whitenedb(i)));
|
||||
for (size_t i = 0; i < (size_t)whitenedb.size(); i++) {
|
||||
double s = whitenedb(i);
|
||||
if (std::abs(s) > 1e-12) entries.emplace_back(nrows + i, ncols, s);
|
||||
}
|
||||
|
||||
// Increment row index
|
||||
row += jacobianFactor->rows();
|
||||
nrows += jacobianFactor->rows();
|
||||
}
|
||||
return vector<triplet>(entries.begin(), entries.end());
|
||||
|
||||
ncols++; // +1 for b-column
|
||||
return entries;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
SparseTriplets GaussianFactorGraph::sparseJacobian() const {
|
||||
size_t nrows, ncols;
|
||||
return sparseJacobian(Ordering(this->keys()), nrows, ncols);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix GaussianFactorGraph::sparseJacobian_() const {
|
||||
|
||||
gttic_(GaussianFactorGraph_sparseJacobian_matrix);
|
||||
// call sparseJacobian
|
||||
typedef boost::tuple<size_t, size_t, double> triplet;
|
||||
vector<triplet> result = sparseJacobian();
|
||||
auto result = sparseJacobian();
|
||||
|
||||
// translate to base 1 matrix
|
||||
size_t nzmax = result.size();
|
||||
Matrix IJS(3,nzmax);
|
||||
Matrix IJS(3, nzmax);
|
||||
for (size_t k = 0; k < result.size(); k++) {
|
||||
const triplet& entry = result[k];
|
||||
IJS(0,k) = double(entry.get<0>() + 1);
|
||||
IJS(1,k) = double(entry.get<1>() + 1);
|
||||
IJS(2,k) = entry.get<2>();
|
||||
const auto& entry = result[k];
|
||||
IJS(0, k) = double(std::get<0>(entry) + 1);
|
||||
IJS(1, k) = double(std::get<1>(entry) + 1);
|
||||
IJS(2, k) = std::get<2>(entry);
|
||||
}
|
||||
return IJS;
|
||||
}
|
||||
|
|
|
@ -181,15 +181,25 @@ namespace gtsam {
|
|||
///@{
|
||||
|
||||
/**
|
||||
* Return vector of i, j, and s to generate an m-by-n sparse Jacobian matrix,
|
||||
* where i(k) and j(k) are the base 0 row and column indices, s(k) a double.
|
||||
* Returns a sparse augmented Jacbian matrix as a vector of i, j, and s,
|
||||
* where i(k) and j(k) are the base 0 row and column indices, and s(k) is
|
||||
* the entry as a double.
|
||||
* The standard deviations are baked into A and b
|
||||
* @return the sparse matrix as a std::vector of std::tuples
|
||||
* @param ordering the column ordering
|
||||
* @param[out] nrows The number of rows in the augmented Jacobian
|
||||
* @param[out] ncols The number of columns in the augmented Jacobian
|
||||
*/
|
||||
std::vector<boost::tuple<size_t, size_t, double> > sparseJacobian() const;
|
||||
std::vector<std::tuple<int, int, double> > sparseJacobian(
|
||||
const Ordering& ordering, size_t& nrows, size_t& ncols) const;
|
||||
|
||||
/** Returns a sparse augmented Jacobian matrix with default ordering */
|
||||
std::vector<std::tuple<int, int, double> > sparseJacobian() const;
|
||||
|
||||
/**
|
||||
* Matrix version of sparseJacobian: generates a 3*m matrix with [i,j,s] entries
|
||||
* such that S(i(k),j(k)) = s(k), which can be given to MATLAB's sparse.
|
||||
* Matrix version of sparseJacobian: generates a 3*m matrix with [i,j,s]
|
||||
* entries such that S(i(k),j(k)) = s(k), which can be given to MATLAB's
|
||||
* sparse. Note: i, j are 1-indexed.
|
||||
* The standard deviations are baked into A and b
|
||||
*/
|
||||
Matrix sparseJacobian_() const;
|
||||
|
|
|
@ -0,0 +1,152 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file PowerMethod.h
|
||||
* @date Sept 2020
|
||||
* @author Jing Wu
|
||||
* @brief Power method for fast eigenvalue and eigenvector
|
||||
* computation
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/Sparse>
|
||||
#include <random>
|
||||
#include <vector>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using Sparse = Eigen::SparseMatrix<double>;
|
||||
|
||||
/**
|
||||
* \brief Compute maximum Eigenpair with power method
|
||||
*
|
||||
* References :
|
||||
* 1) G. Golub and C. V. Loan, Matrix Computations, 3rd ed. Baltimore, Johns
|
||||
* Hopkins University Press, 1996, pp.405-411
|
||||
* 2) Rosen, D. and Carlone, L., 2017, September. Computational
|
||||
* enhancements for certifiably correct SLAM. In Proceedings of the
|
||||
* International Conference on Intelligent Robots and Systems.
|
||||
* 3) Yulun Tian and Kasra Khosoussi and David M. Rosen and Jonathan P. How,
|
||||
* 2020, Aug, Distributed Certifiably Correct Pose-Graph Optimization, Arxiv
|
||||
* 4) C. de Sa, B. He, I. Mitliagkas, C. Ré, and P. Xu, “Accelerated
|
||||
* stochastic power iteration,” in Proc. Mach. Learn. Res., no. 84, 2018, pp.
|
||||
* 58–67
|
||||
*
|
||||
* It performs the following iteration: \f$ x_{k+1} = A * x_k \f$
|
||||
* where A is the aim matrix we want to get eigenpair of, x is the
|
||||
* Ritz vector
|
||||
*
|
||||
* Template argument Operator just needs multiplication operator
|
||||
*
|
||||
*/
|
||||
template <class Operator>
|
||||
class PowerMethod {
|
||||
protected:
|
||||
/**
|
||||
* Const reference to an externally-held matrix whose minimum-eigenvalue we
|
||||
* want to compute
|
||||
*/
|
||||
const Operator &A_;
|
||||
|
||||
const int dim_; // dimension of Matrix A
|
||||
|
||||
size_t nrIterations_; // number of iterations
|
||||
|
||||
double ritzValue_; // Ritz eigenvalue
|
||||
Vector ritzVector_; // Ritz eigenvector
|
||||
|
||||
public:
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/// Construct from the aim matrix and intial ritz vector
|
||||
explicit PowerMethod(const Operator &A,
|
||||
const boost::optional<Vector> initial = boost::none)
|
||||
: A_(A), dim_(A.rows()), nrIterations_(0) {
|
||||
Vector x0;
|
||||
x0 = initial ? initial.get() : Vector::Random(dim_);
|
||||
x0.normalize();
|
||||
|
||||
// initialize Ritz eigen value
|
||||
ritzValue_ = 0.0;
|
||||
|
||||
// initialize Ritz eigen vector
|
||||
ritzVector_ = powerIteration(x0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Run power iteration to get ritzVector with previous ritzVector x, and
|
||||
* return A * x / || A * x ||
|
||||
*/
|
||||
Vector powerIteration(const Vector &x) const {
|
||||
Vector y = A_ * x;
|
||||
y.normalize();
|
||||
return y;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run power iteration to get ritzVector with previous ritzVector x, and
|
||||
* return A * x / || A * x ||
|
||||
*/
|
||||
Vector powerIteration() const { return powerIteration(ritzVector_); }
|
||||
|
||||
/**
|
||||
* After Perform power iteration on a single Ritz value, check if the Ritz
|
||||
* residual for the current Ritz pair is less than the required convergence
|
||||
* tol, return true if yes, else false
|
||||
*/
|
||||
bool converged(double tol) const {
|
||||
const Vector x = ritzVector_;
|
||||
// store the Ritz eigen value
|
||||
const double ritzValue = x.dot(A_ * x);
|
||||
const double error = (A_ * x - ritzValue * x).norm();
|
||||
return error < tol;
|
||||
}
|
||||
|
||||
/// Return the number of iterations
|
||||
size_t nrIterations() const { return nrIterations_; }
|
||||
|
||||
/**
|
||||
* Start the power/accelerated iteration, after performing the
|
||||
* power/accelerated iteration, calculate the ritz error, repeat this
|
||||
* operation until the ritz error converge. If converged return true, else
|
||||
* false.
|
||||
*/
|
||||
bool compute(size_t maxIterations, double tol) {
|
||||
// Starting
|
||||
bool isConverged = false;
|
||||
|
||||
for (size_t i = 0; i < maxIterations && !isConverged; i++) {
|
||||
++nrIterations_;
|
||||
// update the ritzVector after power iteration
|
||||
ritzVector_ = powerIteration();
|
||||
// update the ritzValue
|
||||
ritzValue_ = ritzVector_.dot(A_ * ritzVector_);
|
||||
isConverged = converged(tol);
|
||||
}
|
||||
|
||||
return isConverged;
|
||||
}
|
||||
|
||||
/// Return the eigenvalue
|
||||
double eigenvalue() const { return ritzValue_; }
|
||||
|
||||
/// Return the eigenvector
|
||||
Vector eigenvector() const { return ritzVector_; }
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,63 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SparseEigen.h
|
||||
*
|
||||
* @brief Utilities for creating Eigen sparse matrices (gtsam::SparseEigen)
|
||||
*
|
||||
* @date Aug 2019
|
||||
* @author Mandy Xie
|
||||
* @author Fan Jiang
|
||||
* @author Gerry Chen
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
|
||||
#include <Eigen/Sparse>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/// Eigen-format sparse matrix. Note: ColMajor is ~20% faster since
|
||||
/// InnerIndices must be sorted
|
||||
typedef Eigen::SparseMatrix<double, Eigen::ColMajor, int> SparseEigen;
|
||||
|
||||
/// Constructs an Eigen-format SparseMatrix of a GaussianFactorGraph
|
||||
SparseEigen sparseJacobianEigen(
|
||||
const GaussianFactorGraph &gfg, const Ordering &ordering) {
|
||||
gttic_(SparseEigen_sparseJacobianEigen);
|
||||
// intermediate `entries` vector is kind of unavoidable due to how expensive
|
||||
// factor->rows() is, which prevents us from populating SparseEigen directly.
|
||||
size_t nrows, ncols;
|
||||
auto entries = gfg.sparseJacobian(ordering, nrows, ncols);
|
||||
// declare sparse matrix
|
||||
SparseEigen Ab(nrows, ncols);
|
||||
// See Eigen::set_from_triplets. This is about 5% faster.
|
||||
// pass 1: count the nnz per inner-vector
|
||||
std::vector<int> nnz(ncols, 0);
|
||||
for (const auto &entry : entries) nnz[std::get<1>(entry)]++;
|
||||
Ab.reserve(nnz);
|
||||
// pass 2: insert the elements
|
||||
for (const auto &entry : entries)
|
||||
Ab.insert(std::get<0>(entry), std::get<1>(entry)) = std::get<2>(entry);
|
||||
return Ab;
|
||||
}
|
||||
|
||||
SparseEigen sparseJacobianEigen(const GaussianFactorGraph &gfg) {
|
||||
gttic_(SparseEigen_sparseJacobianEigen_defaultOrdering);
|
||||
return sparseJacobianEigen(gfg, Ordering(gfg.keys()));
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,67 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* powerMethodExample.h
|
||||
*
|
||||
* @file powerMethodExample.h
|
||||
* @date Nov 2020
|
||||
* @author Jing Wu
|
||||
* @brief Create sparse and dense factor graph for
|
||||
* PowerMethod/AcceleratedPowerMethod
|
||||
*/
|
||||
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
|
||||
namespace gtsam {
|
||||
namespace linear {
|
||||
namespace test {
|
||||
namespace example {
|
||||
|
||||
/* ************************************************************************* */
|
||||
inline GaussianFactorGraph createSparseGraph() {
|
||||
using symbol_shorthand::X;
|
||||
// Let's make a scalar synchronization graph with 4 nodes
|
||||
GaussianFactorGraph fg;
|
||||
auto model = noiseModel::Unit::Create(1);
|
||||
for (size_t j = 0; j < 3; j++) {
|
||||
fg.add(X(j), -I_1x1, X(j + 1), I_1x1, Vector1::Zero(), model);
|
||||
}
|
||||
fg.add(X(3), -I_1x1, X(0), I_1x1, Vector1::Zero(), model); // extra row
|
||||
|
||||
return fg;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
inline GaussianFactorGraph createDenseGraph() {
|
||||
using symbol_shorthand::X;
|
||||
// Let's make a scalar synchronization graph with 10 nodes
|
||||
GaussianFactorGraph fg;
|
||||
auto model = noiseModel::Unit::Create(1);
|
||||
// Iterate over nodes
|
||||
for (size_t j = 0; j < 10; j++) {
|
||||
// Each node has an edge with all the others
|
||||
for (size_t i = 1; i < 10; i++)
|
||||
fg.add(X(j), -I_1x1, X((j + i) % 10), I_1x1, Vector1::Zero(), model);
|
||||
}
|
||||
|
||||
return fg;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
} // namespace example
|
||||
} // namespace test
|
||||
} // namespace linear
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,140 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* testPowerMethod.cpp
|
||||
*
|
||||
* @file testAcceleratedPowerMethod.cpp
|
||||
* @date Sept 2020
|
||||
* @author Jing Wu
|
||||
* @brief Check eigenvalue and eigenvector computed by accelerated power method
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/VectorSpace.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/linear/AcceleratedPowerMethod.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/tests/powerMethodExample.h>
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/Dense>
|
||||
#include <Eigen/Eigenvalues>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(AcceleratedPowerMethod, acceleratedPowerIteration) {
|
||||
// test power iteration, beta is set to 0
|
||||
Sparse A(6, 6);
|
||||
A.coeffRef(0, 0) = 6;
|
||||
A.coeffRef(1, 1) = 5;
|
||||
A.coeffRef(2, 2) = 4;
|
||||
A.coeffRef(3, 3) = 3;
|
||||
A.coeffRef(4, 4) = 2;
|
||||
A.coeffRef(5, 5) = 1;
|
||||
Vector initial = (Vector(6) << 0.24434602, 0.22829942, 0.70094486, 0.15463092, 0.55871359,
|
||||
0.2465342).finished();
|
||||
const double ev1 = 6.0;
|
||||
|
||||
// test accelerated power iteration
|
||||
AcceleratedPowerMethod<Sparse> apf(A, initial);
|
||||
apf.compute(100, 1e-5);
|
||||
EXPECT_LONGS_EQUAL(6, apf.eigenvector().rows());
|
||||
|
||||
Vector6 actual1 = apf.eigenvector();
|
||||
const double ritzValue = actual1.dot(A * actual1);
|
||||
const double ritzResidual = (A * actual1 - ritzValue * actual1).norm();
|
||||
EXPECT_DOUBLES_EQUAL(0, ritzResidual, 1e-5);
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(ev1, apf.eigenvalue(), 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(AcceleratedPowerMethod, useFactorGraphSparse) {
|
||||
// Let's make a scalar synchronization graph with 4 nodes
|
||||
GaussianFactorGraph fg = gtsam::linear::test::example::createSparseGraph();
|
||||
|
||||
// Get eigenvalues and eigenvectors with Eigen
|
||||
auto L = fg.hessian();
|
||||
Eigen::EigenSolver<Matrix> solver(L.first);
|
||||
|
||||
// find the index of the max eigenvalue
|
||||
size_t maxIdx = 0;
|
||||
for (auto i = 0; i < solver.eigenvalues().rows(); ++i) {
|
||||
if (solver.eigenvalues()(i).real() >= solver.eigenvalues()(maxIdx).real())
|
||||
maxIdx = i;
|
||||
}
|
||||
// Store the max eigenvalue and its according eigenvector
|
||||
const auto ev1 = solver.eigenvalues()(maxIdx).real();
|
||||
|
||||
Vector disturb = Vector4::Random();
|
||||
disturb.normalize();
|
||||
Vector initial = L.first.row(0);
|
||||
double magnitude = initial.norm();
|
||||
initial += 0.03 * magnitude * disturb;
|
||||
AcceleratedPowerMethod<Matrix> apf(L.first, initial);
|
||||
apf.compute(100, 1e-5);
|
||||
// Check if the eigenvalue is the maximum eigen value
|
||||
EXPECT_DOUBLES_EQUAL(ev1, apf.eigenvalue(), 1e-8);
|
||||
|
||||
// Check if the according ritz residual converged to the threshold
|
||||
Vector actual1 = apf.eigenvector();
|
||||
const double ritzValue = actual1.dot(L.first * actual1);
|
||||
const double ritzResidual = (L.first * actual1 - ritzValue * actual1).norm();
|
||||
EXPECT_DOUBLES_EQUAL(0, ritzResidual, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(AcceleratedPowerMethod, useFactorGraphDense) {
|
||||
// Let's make a scalar synchronization graph with 10 nodes
|
||||
GaussianFactorGraph fg = gtsam::linear::test::example::createDenseGraph();
|
||||
|
||||
// Get eigenvalues and eigenvectors with Eigen
|
||||
auto L = fg.hessian();
|
||||
Eigen::EigenSolver<Matrix> solver(L.first);
|
||||
|
||||
// find the index of the max eigenvalue
|
||||
size_t maxIdx = 0;
|
||||
for (auto i = 0; i < solver.eigenvalues().rows(); ++i) {
|
||||
if (solver.eigenvalues()(i).real() >= solver.eigenvalues()(maxIdx).real())
|
||||
maxIdx = i;
|
||||
}
|
||||
// Store the max eigenvalue and its according eigenvector
|
||||
const auto ev1 = solver.eigenvalues()(maxIdx).real();
|
||||
|
||||
Vector disturb = Vector10::Random();
|
||||
disturb.normalize();
|
||||
Vector initial = L.first.row(0);
|
||||
double magnitude = initial.norm();
|
||||
initial += 0.03 * magnitude * disturb;
|
||||
AcceleratedPowerMethod<Matrix> apf(L.first, initial);
|
||||
apf.compute(100, 1e-5);
|
||||
// Check if the eigenvalue is the maximum eigen value
|
||||
EXPECT_DOUBLES_EQUAL(ev1, apf.eigenvalue(), 1e-8);
|
||||
|
||||
// Check if the according ritz residual converged to the threshold
|
||||
Vector actual1 = apf.eigenvector();
|
||||
const double ritzValue = actual1.dot(L.first * actual1);
|
||||
const double ritzResidual = (L.first * actual1 - ritzValue * actual1).norm();
|
||||
EXPECT_DOUBLES_EQUAL(0, ritzResidual, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -36,9 +36,18 @@ using namespace boost::assign;
|
|||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
// static SharedDiagonal
|
||||
// sigma0_1 = noiseModel::Isotropic::Sigma(2,0.1), sigma_02 = noiseModel::Isotropic::Sigma(2,0.2),
|
||||
// constraintModel = noiseModel::Constrained::All(2);
|
||||
typedef std::tuple<size_t, size_t, double> SparseTriplet;
|
||||
bool triplet_equal(SparseTriplet a, SparseTriplet b) {
|
||||
if (get<0>(a) == get<0>(b) && get<1>(a) == get<1>(b) &&
|
||||
get<2>(a) == get<2>(b)) return true;
|
||||
|
||||
cout << "not equal:" << endl;
|
||||
cout << "\texpected: "
|
||||
"(" << get<0>(a) << ", " << get<1>(a) << ") = " << get<2>(a) << endl;
|
||||
cout << "\tactual: "
|
||||
"(" << get<0>(b) << ", " << get<1>(b) << ") = " << get<2>(b) << endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GaussianFactorGraph, initialization) {
|
||||
|
@ -57,10 +66,11 @@ TEST(GaussianFactorGraph, initialization) {
|
|||
// Test sparse, which takes a vector and returns a matrix, used in MATLAB
|
||||
// Note that this the augmented vector and the RHS is in column 7
|
||||
Matrix expectedIJS =
|
||||
(Matrix(3, 22) << 1., 2., 1., 2., 3., 4., 3., 4., 3., 4., 5., 6., 5., 6., 5., 6., 7., 8., 7.,
|
||||
8., 7., 8., 1., 2., 7., 7., 1., 2., 3., 4., 7., 7., 1., 2., 5., 6., 7., 7., 3., 4., 5., 6.,
|
||||
7., 7., 10., 10., -1., -1., -10., -10., 10., 10., 2., -1., -5., -5., 5., 5., 0., 1., -5.,
|
||||
-5., 5., 5., -1., 1.5).finished();
|
||||
(Matrix(3, 21) <<
|
||||
1., 2., 1., 2., 3., 4., 3., 4., 3., 4., 5., 6., 5., 6., 6., 7., 8., 7., 8., 7., 8.,
|
||||
1., 2., 7., 7., 1., 2., 3., 4., 7., 7., 1., 2., 5., 6., 7., 3., 4., 5., 6., 7., 7.,
|
||||
10., 10., -1., -1., -10., -10., 10., 10., 2., -1., -5., -5., 5., 5.,
|
||||
1., -5., -5., 5., 5., -1., 1.5).finished();
|
||||
Matrix actualIJS = fg.sparseJacobian_();
|
||||
EQUALITY(expectedIJS, actualIJS);
|
||||
}
|
||||
|
@ -74,8 +84,8 @@ TEST(GaussianFactorGraph, sparseJacobian) {
|
|||
// 9 10 0 11 12 13
|
||||
// 0 0 0 14 15 16
|
||||
|
||||
// Expected - NOTE that we transpose this!
|
||||
Matrix expectedT = (Matrix(16, 3) <<
|
||||
// Expected
|
||||
Matrix expected = (Matrix(16, 3) <<
|
||||
1., 1., 2.,
|
||||
1., 2., 4.,
|
||||
1., 3., 6.,
|
||||
|
@ -93,17 +103,32 @@ TEST(GaussianFactorGraph, sparseJacobian) {
|
|||
3., 6.,26.,
|
||||
4., 6.,32.).finished();
|
||||
|
||||
Matrix expected = expectedT.transpose();
|
||||
// expected: in matlab format - NOTE the transpose!)
|
||||
Matrix expectedMatlab = expected.transpose();
|
||||
|
||||
GaussianFactorGraph gfg;
|
||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(2, 0.5);
|
||||
gfg.add(0, (Matrix(2, 3) << 1., 2., 3., 5., 6., 7.).finished(), Vector2(4., 8.), model);
|
||||
gfg.add(0, (Matrix(2, 3) << 9., 10., 0., 0., 0., 0.).finished(), 1,
|
||||
(Matrix(2, 2) << 11., 12., 14., 15.).finished(), Vector2(13., 16.), model);
|
||||
const Key x123 = 0, x45 = 1;
|
||||
gfg.add(x123, (Matrix(2, 3) << 1, 2, 3, 5, 6, 7).finished(),
|
||||
Vector2(4, 8), model);
|
||||
gfg.add(x123, (Matrix(2, 3) << 9, 10, 0, 0, 0, 0).finished(),
|
||||
x45, (Matrix(2, 2) << 11, 12, 14, 15.).finished(),
|
||||
Vector2(13, 16), model);
|
||||
|
||||
Matrix actual = gfg.sparseJacobian_();
|
||||
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
EXPECT(assert_equal(expectedMatlab, actual));
|
||||
|
||||
// SparseTriplets
|
||||
auto boostActual = gfg.sparseJacobian();
|
||||
// check the triplets size...
|
||||
EXPECT_LONGS_EQUAL(16, boostActual.size());
|
||||
// check content
|
||||
for (int i = 0; i < 16; i++) {
|
||||
EXPECT(triplet_equal(
|
||||
SparseTriplet(expected(i, 0) - 1, expected(i, 1) - 1, expected(i, 2)),
|
||||
boostActual.at(i)));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* testPowerMethod.cpp
|
||||
*
|
||||
* @file testPowerMethod.cpp
|
||||
* @date Sept 2020
|
||||
* @author Jing Wu
|
||||
* @brief Check eigenvalue and eigenvector computed by power method
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/VectorSpace.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/PowerMethod.h>
|
||||
#include <gtsam/linear/tests/powerMethodExample.h>
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/Dense>
|
||||
#include <Eigen/Eigenvalues>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(PowerMethod, powerIteration) {
|
||||
// test power iteration, beta is set to 0
|
||||
Sparse A(6, 6);
|
||||
A.coeffRef(0, 0) = 6;
|
||||
A.coeffRef(1, 1) = 5;
|
||||
A.coeffRef(2, 2) = 4;
|
||||
A.coeffRef(3, 3) = 3;
|
||||
A.coeffRef(4, 4) = 2;
|
||||
A.coeffRef(5, 5) = 1;
|
||||
Vector initial = (Vector(6) << 0.24434602, 0.22829942, 0.70094486, 0.15463092, 0.55871359,
|
||||
0.2465342).finished();
|
||||
PowerMethod<Sparse> pf(A, initial);
|
||||
pf.compute(100, 1e-5);
|
||||
EXPECT_LONGS_EQUAL(6, pf.eigenvector().rows());
|
||||
|
||||
Vector6 actual1 = pf.eigenvector();
|
||||
const double ritzValue = actual1.dot(A * actual1);
|
||||
const double ritzResidual = (A * actual1 - ritzValue * actual1).norm();
|
||||
EXPECT_DOUBLES_EQUAL(0, ritzResidual, 1e-5);
|
||||
|
||||
const double ev1 = 6.0;
|
||||
EXPECT_DOUBLES_EQUAL(ev1, pf.eigenvalue(), 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(PowerMethod, useFactorGraphSparse) {
|
||||
// Let's make a scalar synchronization graph with 4 nodes
|
||||
GaussianFactorGraph fg = gtsam::linear::test::example::createSparseGraph();
|
||||
|
||||
// Get eigenvalues and eigenvectors with Eigen
|
||||
auto L = fg.hessian();
|
||||
Eigen::EigenSolver<Matrix> solver(L.first);
|
||||
|
||||
// find the index of the max eigenvalue
|
||||
size_t maxIdx = 0;
|
||||
for (auto i = 0; i < solver.eigenvalues().rows(); ++i) {
|
||||
if (solver.eigenvalues()(i).real() >= solver.eigenvalues()(maxIdx).real())
|
||||
maxIdx = i;
|
||||
}
|
||||
// Store the max eigenvalue and its according eigenvector
|
||||
const auto ev1 = solver.eigenvalues()(maxIdx).real();
|
||||
|
||||
Vector initial = Vector4::Random();
|
||||
PowerMethod<Matrix> pf(L.first, initial);
|
||||
pf.compute(100, 1e-5);
|
||||
EXPECT_DOUBLES_EQUAL(ev1, pf.eigenvalue(), 1e-8);
|
||||
auto actual2 = pf.eigenvector();
|
||||
const double ritzValue = actual2.dot(L.first * actual2);
|
||||
const double ritzResidual = (L.first * actual2 - ritzValue * actual2).norm();
|
||||
EXPECT_DOUBLES_EQUAL(0, ritzResidual, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(PowerMethod, useFactorGraphDense) {
|
||||
// Let's make a scalar synchronization graph with 10 nodes
|
||||
GaussianFactorGraph fg = gtsam::linear::test::example::createDenseGraph();
|
||||
|
||||
// Get eigenvalues and eigenvectors with Eigen
|
||||
auto L = fg.hessian();
|
||||
Eigen::EigenSolver<Matrix> solver(L.first);
|
||||
|
||||
// find the index of the max eigenvalue
|
||||
size_t maxIdx = 0;
|
||||
for (auto i = 0; i < solver.eigenvalues().rows(); ++i) {
|
||||
if (solver.eigenvalues()(i).real() >= solver.eigenvalues()(maxIdx).real())
|
||||
maxIdx = i;
|
||||
}
|
||||
// Store the max eigenvalue and its according eigenvector
|
||||
const auto ev1 = solver.eigenvalues()(maxIdx).real();
|
||||
|
||||
Vector initial = Vector10::Random();
|
||||
PowerMethod<Matrix> pf(L.first, initial);
|
||||
pf.compute(100, 1e-5);
|
||||
EXPECT_DOUBLES_EQUAL(ev1, pf.eigenvalue(), 1e-8);
|
||||
auto actual2 = pf.eigenvector();
|
||||
const double ritzValue = actual2.dot(L.first * actual2);
|
||||
const double ritzResidual = (L.first * actual2 - ritzValue * actual2).norm();
|
||||
EXPECT_DOUBLES_EQUAL(0, ritzResidual, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -0,0 +1,72 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testSparseMatrix.cpp
|
||||
* @author Mandy Xie
|
||||
* @author Fan Jiang
|
||||
* @author Gerry Chen
|
||||
* @author Frank Dellaert
|
||||
* @date Jan, 2021
|
||||
*/
|
||||
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/SparseEigen.h>
|
||||
|
||||
#include <boost/assign/list_of.hpp>
|
||||
using boost::assign::list_of;
|
||||
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SparseEigen, sparseJacobianEigen) {
|
||||
GaussianFactorGraph gfg;
|
||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(2, 0.5);
|
||||
const Key x123 = 0, x45 = 1;
|
||||
gfg.add(x123, (Matrix(2, 3) << 1, 2, 3, 5, 6, 7).finished(),
|
||||
Vector2(4, 8), model);
|
||||
gfg.add(x123, (Matrix(2, 3) << 9, 10, 0, 0, 0, 0).finished(),
|
||||
x45, (Matrix(2, 2) << 11, 12, 14, 15.).finished(),
|
||||
Vector2(13, 16), model);
|
||||
|
||||
// Sparse Matrix
|
||||
auto sparseResult = sparseJacobianEigen(gfg);
|
||||
EXPECT_LONGS_EQUAL(16, sparseResult.nonZeros());
|
||||
EXPECT(assert_equal(4, sparseResult.rows()));
|
||||
EXPECT(assert_equal(6, sparseResult.cols()));
|
||||
EXPECT(assert_equal(gfg.augmentedJacobian(), Matrix(sparseResult)));
|
||||
|
||||
// Call sparseJacobian with optional ordering...
|
||||
auto ordering = Ordering(list_of(x45)(x123));
|
||||
|
||||
// Eigen Sparse with optional ordering
|
||||
EXPECT(assert_equal(gfg.augmentedJacobian(ordering),
|
||||
Matrix(sparseJacobianEigen(gfg, ordering))));
|
||||
|
||||
// Check matrix dimensions when zero rows / cols
|
||||
gfg.add(x123, Matrix23::Zero(), Vector2::Zero(), model); // zero row
|
||||
gfg.add(2, Matrix21::Zero(), Vector2::Zero(), model); // zero col
|
||||
sparseResult = sparseJacobianEigen(gfg);
|
||||
EXPECT_LONGS_EQUAL(16, sparseResult.nonZeros());
|
||||
EXPECT(assert_equal(8, sparseResult.rows()));
|
||||
EXPECT(assert_equal(7, sparseResult.cols()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -167,7 +167,7 @@ gtsam::NonlinearFactor::shared_ptr CombinedImuFactor::clone() const {
|
|||
//------------------------------------------------------------------------------
|
||||
void CombinedImuFactor::print(const string& s,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
cout << (s == "" ? s : s + "\n") << "CombinedImuFactor("
|
||||
cout << (s.empty() ? s : s + "\n") << "CombinedImuFactor("
|
||||
<< keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << ","
|
||||
<< keyFormatter(this->key3()) << "," << keyFormatter(this->key4()) << ","
|
||||
<< keyFormatter(this->key5()) << "," << keyFormatter(this->key6())
|
||||
|
|
|
@ -130,7 +130,7 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor& f) {
|
|||
|
||||
//------------------------------------------------------------------------------
|
||||
void ImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const {
|
||||
cout << (s == "" ? s : s + "\n") << "ImuFactor(" << keyFormatter(this->key1())
|
||||
cout << (s.empty() ? s : s + "\n") << "ImuFactor(" << keyFormatter(this->key1())
|
||||
<< "," << keyFormatter(this->key2()) << "," << keyFormatter(this->key3())
|
||||
<< "," << keyFormatter(this->key4()) << "," << keyFormatter(this->key5())
|
||||
<< ")\n";
|
||||
|
@ -226,7 +226,7 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor2& f) {
|
|||
//------------------------------------------------------------------------------
|
||||
void ImuFactor2::print(const string& s,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
cout << (s == "" ? s : s + "\n") << "ImuFactor2("
|
||||
cout << (s.empty() ? s : s + "\n") << "ImuFactor2("
|
||||
<< keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << ","
|
||||
<< keyFormatter(this->key3()) << ")\n";
|
||||
cout << *this << endl;
|
||||
|
|
|
@ -26,7 +26,7 @@ using namespace std;
|
|||
namespace gtsam {
|
||||
|
||||
void PreintegratedRotationParams::print(const string& s) const {
|
||||
cout << (s == "" ? s : s + "\n") << endl;
|
||||
cout << (s.empty() ? s : s + "\n") << endl;
|
||||
cout << "gyroscopeCovariance:\n[\n" << gyroscopeCovariance << "\n]" << endl;
|
||||
if (omegaCoriolis)
|
||||
cout << "omegaCoriolis = (" << omegaCoriolis->transpose() << ")" << endl;
|
||||
|
|
|
@ -46,7 +46,7 @@ ostream& operator<<(ostream& os, const PreintegrationBase& pim) {
|
|||
|
||||
//------------------------------------------------------------------------------
|
||||
void PreintegrationBase::print(const string& s) const {
|
||||
cout << (s == "" ? s : s + "\n") << *this << endl;
|
||||
cout << (s.empty() ? s : s + "\n") << *this << endl;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
|
|
@ -75,7 +75,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
void Values::print(const string& str, const KeyFormatter& keyFormatter) const {
|
||||
cout << str << (str == "" ? "" : "\n");
|
||||
cout << str << (str.empty() ? "" : "\n");
|
||||
cout << "Values with " << size() << " values:\n";
|
||||
for(const_iterator key_value = begin(); key_value != end(); ++key_value) {
|
||||
cout << "Value " << keyFormatter(key_value->key) << ": ";
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
|
||||
#include <SymEigsSolver.h>
|
||||
#include <cmath>
|
||||
#include <gtsam/linear/PCGSolver.h>
|
||||
#include <gtsam/linear/SubgraphPreconditioner.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
@ -487,8 +488,88 @@ Sparse ShonanAveraging<d>::computeA(const Values &values) const {
|
|||
return Lambda - Q_;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <size_t d>
|
||||
Sparse ShonanAveraging<d>::computeA(const Matrix &S) const {
|
||||
auto Lambda = computeLambda(S);
|
||||
return Lambda - Q_;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Perturb the initial initialVector by adding a spherically-uniformly
|
||||
// distributed random vector with 0.03*||initialVector||_2 magnitude to
|
||||
// initialVector
|
||||
// ref : Part III. C, Rosen, D. and Carlone, L., 2017, September. Computational
|
||||
// enhancements for certifiably correct SLAM. In Proceedings of the
|
||||
// International Conference on Intelligent Robots and Systems.
|
||||
static Vector perturb(const Vector &initialVector) {
|
||||
// generate a 0.03*||x_0||_2 as stated in David's paper
|
||||
int n = initialVector.rows();
|
||||
Vector disturb = Vector::Random(n);
|
||||
disturb.normalize();
|
||||
|
||||
double magnitude = initialVector.norm();
|
||||
Vector perturbedVector = initialVector + 0.03 * magnitude * disturb;
|
||||
perturbedVector.normalize();
|
||||
return perturbedVector;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/// MINIMUM EIGENVALUE COMPUTATIONS
|
||||
// Alg.6 from paper Distributed Certifiably Correct Pose-Graph Optimization,
|
||||
// it takes in the certificate matrix A as input, the maxIterations and the
|
||||
// minEigenvalueNonnegativityTolerance is set to 1000 and 10e-4 ad default,
|
||||
// there are two parts
|
||||
// in this algorithm:
|
||||
// (1) compute the maximum eigenpair (\lamda_dom, \vect{v}_dom) of A by power
|
||||
// method. if \lamda_dom is less than zero, then return the eigenpair. (2)
|
||||
// compute the maximum eigenpair (\theta, \vect{v}) of C = \lamda_dom * I - A by
|
||||
// accelerated power method. Then return (\lamda_dom - \theta, \vect{v}).
|
||||
static bool PowerMinimumEigenValue(
|
||||
const Sparse &A, const Matrix &S, double &minEigenValue,
|
||||
Vector *minEigenVector = 0, size_t *numIterations = 0,
|
||||
size_t maxIterations = 1000,
|
||||
double minEigenvalueNonnegativityTolerance = 10e-4) {
|
||||
|
||||
// a. Compute dominant eigenpair of S using power method
|
||||
PowerMethod<Sparse> pmOperator(A);
|
||||
|
||||
const bool pmConverged = pmOperator.compute(
|
||||
maxIterations, 1e-5);
|
||||
|
||||
// Check convergence and bail out if necessary
|
||||
if (!pmConverged) return false;
|
||||
|
||||
const double pmEigenValue = pmOperator.eigenvalue();
|
||||
|
||||
if (pmEigenValue < 0) {
|
||||
// The largest-magnitude eigenvalue is negative, and therefore also the
|
||||
// minimum eigenvalue, so just return this solution
|
||||
minEigenValue = pmEigenValue;
|
||||
if (minEigenVector) {
|
||||
*minEigenVector = pmOperator.eigenvector();
|
||||
minEigenVector->normalize(); // Ensure that this is a unit vector
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
const Sparse C = pmEigenValue * Matrix::Identity(A.rows(), A.cols()) - A;
|
||||
const boost::optional<Vector> initial = perturb(S.row(0));
|
||||
AcceleratedPowerMethod<Sparse> apmShiftedOperator(C, initial);
|
||||
|
||||
const bool minConverged = apmShiftedOperator.compute(
|
||||
maxIterations, minEigenvalueNonnegativityTolerance / pmEigenValue);
|
||||
|
||||
if (!minConverged) return false;
|
||||
|
||||
minEigenValue = pmEigenValue - apmShiftedOperator.eigenvalue();
|
||||
if (minEigenVector) {
|
||||
*minEigenVector = apmShiftedOperator.eigenvector();
|
||||
minEigenVector->normalize(); // Ensure that this is a unit vector
|
||||
}
|
||||
if (numIterations) *numIterations = apmShiftedOperator.nrIterations();
|
||||
return true;
|
||||
}
|
||||
|
||||
/** This is a lightweight struct used in conjunction with Spectra to compute
|
||||
* the minimum eigenvalue and eigenvector of a sparse matrix A; it has a single
|
||||
|
@ -634,13 +715,6 @@ static bool SparseMinimumEigenValue(
|
|||
return true;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <size_t d>
|
||||
Sparse ShonanAveraging<d>::computeA(const Matrix &S) const {
|
||||
auto Lambda = computeLambda(S);
|
||||
return Lambda - Q_;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <size_t d>
|
||||
double ShonanAveraging<d>::computeMinEigenValue(const Values &values,
|
||||
|
@ -658,6 +732,23 @@ double ShonanAveraging<d>::computeMinEigenValue(const Values &values,
|
|||
return minEigenValue;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <size_t d>
|
||||
double ShonanAveraging<d>::computeMinEigenValueAP(const Values &values,
|
||||
Vector *minEigenVector) const {
|
||||
assert(values.size() == nrUnknowns());
|
||||
const Matrix S = StiefelElementMatrix(values);
|
||||
auto A = computeA(S);
|
||||
|
||||
double minEigenValue = 0;
|
||||
bool success = PowerMinimumEigenValue(A, S, minEigenValue, minEigenVector);
|
||||
if (!success) {
|
||||
throw std::runtime_error(
|
||||
"PowerMinimumEigenValue failed to compute minimum eigenvalue.");
|
||||
}
|
||||
return minEigenValue;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <size_t d>
|
||||
std::pair<double, Vector> ShonanAveraging<d>::computeMinEigenVector(
|
||||
|
|
|
@ -26,6 +26,8 @@
|
|||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtParams.h>
|
||||
#include <gtsam/sfm/BinaryMeasurement.h>
|
||||
#include <gtsam/linear/PowerMethod.h>
|
||||
#include <gtsam/linear/AcceleratedPowerMethod.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
#include <Eigen/Sparse>
|
||||
|
@ -89,7 +91,8 @@ struct GTSAM_EXPORT ShonanAveragingParameters {
|
|||
bool getCertifyOptimality() const { return certifyOptimality; }
|
||||
|
||||
/// Print the parameters and flags used for rotation averaging.
|
||||
void print() const {
|
||||
void print(const std::string &s = "") const {
|
||||
std::cout << (s.empty() ? s : s + " ");
|
||||
std::cout << " ShonanAveragingParameters: " << std::endl;
|
||||
std::cout << " alpha: " << alpha << std::endl;
|
||||
std::cout << " beta: " << beta << std::endl;
|
||||
|
@ -250,6 +253,13 @@ class GTSAM_EXPORT ShonanAveraging {
|
|||
double computeMinEigenValue(const Values &values,
|
||||
Vector *minEigenVector = nullptr) const;
|
||||
|
||||
/**
|
||||
* Compute minimum eigenvalue with accelerated power method.
|
||||
* @param values: should be of type SOn
|
||||
*/
|
||||
double computeMinEigenValueAP(const Values &values,
|
||||
Vector *minEigenVector = nullptr) const;
|
||||
|
||||
/// Project pxdN Stiefel manifold matrix S to Rot3^N
|
||||
Values roundSolutionS(const Matrix &S) const;
|
||||
|
||||
|
|
|
@ -188,9 +188,14 @@ TEST(ShonanAveraging3, CheckWithEigen) {
|
|||
for (int i = 1; i < lambdas.size(); i++)
|
||||
minEigenValue = min(lambdas(i), minEigenValue);
|
||||
|
||||
// Compute Eigenvalue with Accelerated Power method
|
||||
double lambdaAP = kShonan.computeMinEigenValueAP(Qstar3);
|
||||
|
||||
// Actual check
|
||||
EXPECT_DOUBLES_EQUAL(0, lambda, 1e-11);
|
||||
EXPECT_DOUBLES_EQUAL(0, minEigenValue, 1e-11);
|
||||
EXPECT_DOUBLES_EQUAL(0, lambdaAP, 1e-11);
|
||||
|
||||
|
||||
// Construct test descent direction (as minEigenVector is not predictable
|
||||
// across platforms, being one from a basically flat 3d- subspace)
|
||||
|
|
|
@ -113,9 +113,9 @@ class SmartStereoProjectionPoseFactor : public SmartStereoProjectionFactor {
|
|||
* print
|
||||
* @param s optional string naming the factor
|
||||
* @param keyFormatter optional formatter useful for printing Symbols
|
||||
*/ void print(
|
||||
const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override;
|
||||
*/
|
||||
void print(const std::string& s = "", const KeyFormatter& keyFormatter =
|
||||
DefaultKeyFormatter) const override;
|
||||
|
||||
/// equals
|
||||
bool equals(const NonlinearFactor& p, double tol = 1e-9) const override;
|
||||
|
@ -155,4 +155,4 @@ template <>
|
|||
struct traits<SmartStereoProjectionPoseFactor>
|
||||
: public Testable<SmartStereoProjectionPoseFactor> {};
|
||||
|
||||
} // \ namespace gtsam
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
#pragma once
|
||||
|
||||
#include <tests/Measurement.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
*\brief Contains data from the IMU mesaurements.
|
||||
*/
|
||||
class ImuMeasurement : public Measurement {
|
||||
public:
|
||||
enum Name { BODY = 0, RF_FOOT = 1, RH_FOOT = 2 };
|
||||
|
||||
Name name; ///< Unique string identifier
|
||||
Eigen::Vector3d I_a_WI; ///< Raw acceleration from the IMU (m/s/s)
|
||||
Eigen::Vector3d I_w_WI; ///< Raw angular velocity from the IMU (rad/s)
|
||||
|
||||
ImuMeasurement()
|
||||
: Measurement("ImuMeasurement"), I_a_WI{0, 0, 0}, I_w_WI{0, 0, 0} {}
|
||||
|
||||
virtual ~ImuMeasurement() override {}
|
||||
|
||||
friend std::ostream& operator<<(std::ostream& stream,
|
||||
const ImuMeasurement& meas);
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& stream, const ImuMeasurement& meas) {
|
||||
stream << "IMU Measurement at time = " << meas.time << " : \n"
|
||||
<< "dt : " << meas.dt << "\n"
|
||||
<< "I_a_WI: " << meas.I_a_WI.transpose() << "\n"
|
||||
<< "I_w_WI: " << meas.I_w_WI.transpose() << "\n";
|
||||
return stream;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,25 @@
|
|||
#pragma once
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <string>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* \brief This is the base class for all measurement types.
|
||||
*/
|
||||
class Measurement {
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
size_t dt; ///< Time since the last message of this type (nanoseconds).
|
||||
size_t time; ///< ROS time message recieved (nanoseconds).
|
||||
///< The type of message (to enable dynamic/static casting).
|
||||
std::string type;
|
||||
|
||||
Measurement() : dt(0), time(0), type("UNDEFINED") {}
|
||||
Measurement(std::string _type) : dt(0), time(0), type(_type) {}
|
||||
|
||||
virtual ~Measurement() {}
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,127 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testImuPreintegration.cpp
|
||||
* @brief Unit tests for IMU Preintegration
|
||||
* @author Russell Buchanan
|
||||
**/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <tests/ImuMeasurement.h>
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* \brief Uses the GTSAM library to perform IMU preintegration on an
|
||||
* acceleration input.
|
||||
*/
|
||||
TEST(TestImuPreintegration, LoadedSimulationData) {
|
||||
Vector3 finalPos(0, 0, 0);
|
||||
|
||||
vector<ImuMeasurement> imuMeasurements;
|
||||
|
||||
double accNoiseSigma = 0.001249;
|
||||
double accBiasRwSigma = 0.000106;
|
||||
double gyrNoiseSigma = 0.000208;
|
||||
double gyrBiasRwSigma = 0.000004;
|
||||
double integrationCovariance = 1e-8;
|
||||
double biasAccOmegaInt = 1e-5;
|
||||
|
||||
double gravity = 9.81;
|
||||
double rate = 400.0; // Hz
|
||||
|
||||
string inFileString = findExampleDataFile("quadraped_imu_data.csv");
|
||||
ifstream inputFile(inFileString);
|
||||
string line;
|
||||
while (getline(inputFile, line)) {
|
||||
stringstream ss(line);
|
||||
string str;
|
||||
vector<double> results;
|
||||
while (getline(ss, str, ',')) {
|
||||
results.push_back(atof(str.c_str()));
|
||||
}
|
||||
ImuMeasurement measurement;
|
||||
measurement.dt = static_cast<size_t>(1e9 * (1 / rate));
|
||||
measurement.time = results[2];
|
||||
measurement.I_a_WI = {results[29], results[30], results[31]};
|
||||
measurement.I_w_WI = {results[17], results[18], results[19]};
|
||||
imuMeasurements.push_back(measurement);
|
||||
}
|
||||
|
||||
// Assume a Z-up navigation (assuming we are performing optimization in the
|
||||
// IMU frame).
|
||||
auto imuPreintegratedParams =
|
||||
PreintegratedCombinedMeasurements::Params::MakeSharedU(gravity);
|
||||
imuPreintegratedParams->accelerometerCovariance =
|
||||
I_3x3 * pow(accNoiseSigma, 2);
|
||||
imuPreintegratedParams->biasAccCovariance = I_3x3 * pow(accBiasRwSigma, 2);
|
||||
imuPreintegratedParams->gyroscopeCovariance = I_3x3 * pow(gyrNoiseSigma, 2);
|
||||
imuPreintegratedParams->biasOmegaCovariance = I_3x3 * pow(gyrBiasRwSigma, 2);
|
||||
imuPreintegratedParams->integrationCovariance = I_3x3 * integrationCovariance;
|
||||
imuPreintegratedParams->biasAccOmegaInt = I_6x6 * biasAccOmegaInt;
|
||||
|
||||
// Initial state
|
||||
Pose3 priorPose;
|
||||
Vector3 priorVelocity(0, 0, 0);
|
||||
imuBias::ConstantBias priorImuBias;
|
||||
PreintegratedCombinedMeasurements imuPreintegrated;
|
||||
Vector3 position(0, 0, 0);
|
||||
Vector3 velocity(0, 0, 0);
|
||||
NavState propState;
|
||||
|
||||
NavState initialNavState(priorPose, priorVelocity);
|
||||
|
||||
// Assume zero bias for simulated data
|
||||
priorImuBias =
|
||||
imuBias::ConstantBias(Eigen::Vector3d(0, 0, 0), Eigen::Vector3d(0, 0, 0));
|
||||
|
||||
imuPreintegrated =
|
||||
PreintegratedCombinedMeasurements(imuPreintegratedParams, priorImuBias);
|
||||
|
||||
// start at 1 to skip header
|
||||
for (size_t n = 1; n < imuMeasurements.size(); n++) {
|
||||
// integrate
|
||||
imuPreintegrated.integrateMeasurement(imuMeasurements[n].I_a_WI,
|
||||
imuMeasurements[n].I_w_WI, 1 / rate);
|
||||
// predict
|
||||
propState = imuPreintegrated.predict(initialNavState, priorImuBias);
|
||||
position = propState.pose().translation();
|
||||
velocity = propState.velocity();
|
||||
}
|
||||
|
||||
Vector3 rotation = propState.pose().rotation().rpy();
|
||||
|
||||
// Dont have ground truth for x and y position yet
|
||||
// DOUBLES_EQUAL(0.1, position[0], 1e-2);
|
||||
// DOUBLES_EQUAL(0.1, position[1], 1e-2);
|
||||
DOUBLES_EQUAL(0.0, position[2], 1e-2);
|
||||
|
||||
DOUBLES_EQUAL(0.0, rotation[0], 1e-2);
|
||||
DOUBLES_EQUAL(0.0, rotation[1], 1e-2);
|
||||
DOUBLES_EQUAL(0.0, rotation[2], 1e-2);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
Loading…
Reference in New Issue