Merge branch 'develop' into fix/windows-tests
commit
a9d3a10032
|
@ -0,0 +1,33 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Basis.cpp
|
||||
* @brief Compute an interpolating basis
|
||||
* @author Varun Agrawal
|
||||
* @date June 20, 2023
|
||||
*/
|
||||
|
||||
#include <gtsam/basis/Basis.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
Matrix kroneckerProductIdentity(size_t M, const Weights& w) {
|
||||
Matrix result(M, w.cols() * M);
|
||||
result.setZero();
|
||||
|
||||
for (int i = 0; i < w.cols(); i++) {
|
||||
result.block(0, i * M, M, M).diagonal().array() = w(i);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -20,7 +20,6 @@
|
|||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
|
@ -81,16 +80,7 @@ using Weights = Eigen::Matrix<double, 1, -1>; /* 1xN vector */
|
|||
*
|
||||
* @ingroup basis
|
||||
*/
|
||||
template <size_t M>
|
||||
Matrix kroneckerProductIdentity(const Weights& w) {
|
||||
Matrix result(M, w.cols() * M);
|
||||
result.setZero();
|
||||
|
||||
for (int i = 0; i < w.cols(); i++) {
|
||||
result.block(0, i * M, M, M).diagonal().array() = w(i);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
Matrix kroneckerProductIdentity(size_t M, const Weights& w);
|
||||
|
||||
/**
|
||||
* CRTP Base class for function bases
|
||||
|
@ -169,18 +159,18 @@ class Basis {
|
|||
};
|
||||
|
||||
/**
|
||||
* VectorEvaluationFunctor at a given x, applied to ParameterMatrix<M>.
|
||||
* VectorEvaluationFunctor at a given x, applied to a parameter Matrix.
|
||||
* This functor is used to evaluate a parameterized function at a given scalar
|
||||
* value x. When given a specific M*N parameters, returns an M-vector the M
|
||||
* corresponding functions at x, possibly with Jacobians wrpt the parameters.
|
||||
*/
|
||||
template <int M>
|
||||
class VectorEvaluationFunctor : protected EvaluationFunctor {
|
||||
protected:
|
||||
using VectorM = Eigen::Matrix<double, M, 1>;
|
||||
using Jacobian = Eigen::Matrix<double, /*MxMN*/ M, -1>;
|
||||
using Jacobian = Eigen::Matrix<double, /*MxMN*/ -1, -1>;
|
||||
Jacobian H_;
|
||||
|
||||
size_t M_;
|
||||
|
||||
/**
|
||||
* Calculate the `M*(M*N)` Jacobian of this functor with respect to
|
||||
* the M*N parameter matrix `P`.
|
||||
|
@ -190,7 +180,7 @@ class Basis {
|
|||
* i.e., the Kronecker product of weights_ with the MxM identity matrix.
|
||||
*/
|
||||
void calculateJacobian() {
|
||||
H_ = kroneckerProductIdentity<M>(this->weights_);
|
||||
H_ = kroneckerProductIdentity(M_, this->weights_);
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -200,25 +190,26 @@ class Basis {
|
|||
VectorEvaluationFunctor() {}
|
||||
|
||||
/// Default Constructor
|
||||
VectorEvaluationFunctor(size_t N, double x) : EvaluationFunctor(N, x) {
|
||||
VectorEvaluationFunctor(size_t M, size_t N, double x)
|
||||
: EvaluationFunctor(N, x), M_(M) {
|
||||
calculateJacobian();
|
||||
}
|
||||
|
||||
/// Constructor, with interval [a,b]
|
||||
VectorEvaluationFunctor(size_t N, double x, double a, double b)
|
||||
: EvaluationFunctor(N, x, a, b) {
|
||||
VectorEvaluationFunctor(size_t M, size_t N, double x, double a, double b)
|
||||
: EvaluationFunctor(N, x, a, b), M_(M) {
|
||||
calculateJacobian();
|
||||
}
|
||||
|
||||
/// M-dimensional evaluation
|
||||
VectorM apply(const ParameterMatrix<M>& P,
|
||||
Vector apply(const Matrix& P,
|
||||
OptionalJacobian</*MxN*/ -1, -1> H = {}) const {
|
||||
if (H) *H = H_;
|
||||
return P.matrix() * this->weights_.transpose();
|
||||
}
|
||||
|
||||
/// c++ sugar
|
||||
VectorM operator()(const ParameterMatrix<M>& P,
|
||||
Vector operator()(const Matrix& P,
|
||||
OptionalJacobian</*MxN*/ -1, -1> H = {}) const {
|
||||
return apply(P, H);
|
||||
}
|
||||
|
@ -231,13 +222,14 @@ class Basis {
|
|||
*
|
||||
* This component is specified by the row index i, with 0<i<M.
|
||||
*/
|
||||
template <int M>
|
||||
class VectorComponentFunctor : public EvaluationFunctor {
|
||||
protected:
|
||||
using Jacobian = Eigen::Matrix<double, /*1xMN*/ 1, -1>;
|
||||
size_t rowIndex_;
|
||||
Jacobian H_;
|
||||
|
||||
size_t M_;
|
||||
size_t rowIndex_;
|
||||
|
||||
/*
|
||||
* Calculate the `1*(M*N)` Jacobian of this functor with respect to
|
||||
* the M*N parameter matrix `P`.
|
||||
|
@ -248,9 +240,9 @@ class Basis {
|
|||
* MxM identity matrix. See also VectorEvaluationFunctor.
|
||||
*/
|
||||
void calculateJacobian(size_t N) {
|
||||
H_.setZero(1, M * N);
|
||||
H_.setZero(1, M_ * N);
|
||||
for (int j = 0; j < EvaluationFunctor::weights_.size(); j++)
|
||||
H_(0, rowIndex_ + j * M) = EvaluationFunctor::weights_(j);
|
||||
H_(0, rowIndex_ + j * M_) = EvaluationFunctor::weights_(j);
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -258,33 +250,34 @@ class Basis {
|
|||
VectorComponentFunctor() {}
|
||||
|
||||
/// Construct with row index
|
||||
VectorComponentFunctor(size_t N, size_t i, double x)
|
||||
: EvaluationFunctor(N, x), rowIndex_(i) {
|
||||
VectorComponentFunctor(size_t M, size_t N, size_t i, double x)
|
||||
: EvaluationFunctor(N, x), M_(M), rowIndex_(i) {
|
||||
calculateJacobian(N);
|
||||
}
|
||||
|
||||
/// Construct with row index and interval
|
||||
VectorComponentFunctor(size_t N, size_t i, double x, double a, double b)
|
||||
: EvaluationFunctor(N, x, a, b), rowIndex_(i) {
|
||||
VectorComponentFunctor(size_t M, size_t N, size_t i, double x, double a,
|
||||
double b)
|
||||
: EvaluationFunctor(N, x, a, b), M_(M), rowIndex_(i) {
|
||||
calculateJacobian(N);
|
||||
}
|
||||
|
||||
/// Calculate component of component rowIndex_ of P
|
||||
double apply(const ParameterMatrix<M>& P,
|
||||
double apply(const Matrix& P,
|
||||
OptionalJacobian</*1xMN*/ -1, -1> H = {}) const {
|
||||
if (H) *H = H_;
|
||||
return P.row(rowIndex_) * EvaluationFunctor::weights_.transpose();
|
||||
}
|
||||
|
||||
/// c++ sugar
|
||||
double operator()(const ParameterMatrix<M>& P,
|
||||
double operator()(const Matrix& P,
|
||||
OptionalJacobian</*1xMN*/ -1, -1> H = {}) const {
|
||||
return apply(P, H);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Manifold EvaluationFunctor at a given x, applied to ParameterMatrix<M>.
|
||||
* Manifold EvaluationFunctor at a given x, applied to a parameter Matrix.
|
||||
* This functor is used to evaluate a parameterized function at a given scalar
|
||||
* value x. When given a specific M*N parameters, returns an M-vector the M
|
||||
* corresponding functions at x, possibly with Jacobians wrpt the parameters.
|
||||
|
@ -297,25 +290,23 @@ class Basis {
|
|||
* 3D rotation.
|
||||
*/
|
||||
template <class T>
|
||||
class ManifoldEvaluationFunctor
|
||||
: public VectorEvaluationFunctor<traits<T>::dimension> {
|
||||
class ManifoldEvaluationFunctor : public VectorEvaluationFunctor {
|
||||
enum { M = traits<T>::dimension };
|
||||
using Base = VectorEvaluationFunctor<M>;
|
||||
using Base = VectorEvaluationFunctor;
|
||||
|
||||
public:
|
||||
/// For serialization
|
||||
ManifoldEvaluationFunctor() {}
|
||||
|
||||
/// Default Constructor
|
||||
ManifoldEvaluationFunctor(size_t N, double x) : Base(N, x) {}
|
||||
ManifoldEvaluationFunctor(size_t N, double x) : Base(M, N, x) {}
|
||||
|
||||
/// Constructor, with interval [a,b]
|
||||
ManifoldEvaluationFunctor(size_t N, double x, double a, double b)
|
||||
: Base(N, x, a, b) {}
|
||||
: Base(M, N, x, a, b) {}
|
||||
|
||||
/// Manifold evaluation
|
||||
T apply(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxMN*/ -1, -1> H = {}) const {
|
||||
T apply(const Matrix& P, OptionalJacobian</*MxMN*/ -1, -1> H = {}) const {
|
||||
// Interpolate the M-dimensional vector to yield a vector in tangent space
|
||||
Eigen::Matrix<double, M, 1> xi = Base::operator()(P, H);
|
||||
|
||||
|
@ -333,7 +324,7 @@ class Basis {
|
|||
}
|
||||
|
||||
/// c++ sugar
|
||||
T operator()(const ParameterMatrix<M>& P,
|
||||
T operator()(const Matrix& P,
|
||||
OptionalJacobian</*MxN*/ -1, -1> H = {}) const {
|
||||
return apply(P, H); // might call apply in derived
|
||||
}
|
||||
|
@ -389,20 +380,20 @@ class Basis {
|
|||
};
|
||||
|
||||
/**
|
||||
* VectorDerivativeFunctor at a given x, applied to ParameterMatrix<M>.
|
||||
* VectorDerivativeFunctor at a given x, applied to a parameter Matrix.
|
||||
*
|
||||
* This functor is used to evaluate the derivatives of a parameterized
|
||||
* function at a given scalar value x. When given a specific M*N parameters,
|
||||
* returns an M-vector the M corresponding function derivatives at x, possibly
|
||||
* with Jacobians wrpt the parameters.
|
||||
*/
|
||||
template <int M>
|
||||
class VectorDerivativeFunctor : protected DerivativeFunctorBase {
|
||||
protected:
|
||||
using VectorM = Eigen::Matrix<double, M, 1>;
|
||||
using Jacobian = Eigen::Matrix<double, /*MxMN*/ M, -1>;
|
||||
using Jacobian = Eigen::Matrix<double, /*MxMN*/ -1, -1>;
|
||||
Jacobian H_;
|
||||
|
||||
size_t M_;
|
||||
|
||||
/**
|
||||
* Calculate the `M*(M*N)` Jacobian of this functor with respect to
|
||||
* the M*N parameter matrix `P`.
|
||||
|
@ -412,7 +403,7 @@ class Basis {
|
|||
* i.e., the Kronecker product of weights_ with the MxM identity matrix.
|
||||
*/
|
||||
void calculateJacobian() {
|
||||
H_ = kroneckerProductIdentity<M>(this->weights_);
|
||||
H_ = kroneckerProductIdentity(M_, this->weights_);
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -422,24 +413,24 @@ class Basis {
|
|||
VectorDerivativeFunctor() {}
|
||||
|
||||
/// Default Constructor
|
||||
VectorDerivativeFunctor(size_t N, double x) : DerivativeFunctorBase(N, x) {
|
||||
VectorDerivativeFunctor(size_t M, size_t N, double x)
|
||||
: DerivativeFunctorBase(N, x), M_(M) {
|
||||
calculateJacobian();
|
||||
}
|
||||
|
||||
/// Constructor, with optional interval [a,b]
|
||||
VectorDerivativeFunctor(size_t N, double x, double a, double b)
|
||||
: DerivativeFunctorBase(N, x, a, b) {
|
||||
VectorDerivativeFunctor(size_t M, size_t N, double x, double a, double b)
|
||||
: DerivativeFunctorBase(N, x, a, b), M_(M) {
|
||||
calculateJacobian();
|
||||
}
|
||||
|
||||
VectorM apply(const ParameterMatrix<M>& P,
|
||||
Vector apply(const Matrix& P,
|
||||
OptionalJacobian</*MxMN*/ -1, -1> H = {}) const {
|
||||
if (H) *H = H_;
|
||||
return P.matrix() * this->weights_.transpose();
|
||||
}
|
||||
/// c++ sugar
|
||||
VectorM operator()(
|
||||
const ParameterMatrix<M>& P,
|
||||
Vector operator()(const Matrix& P,
|
||||
OptionalJacobian</*MxMN*/ -1, -1> H = {}) const {
|
||||
return apply(P, H);
|
||||
}
|
||||
|
@ -452,13 +443,14 @@ class Basis {
|
|||
*
|
||||
* This component is specified by the row index i, with 0<i<M.
|
||||
*/
|
||||
template <int M>
|
||||
class ComponentDerivativeFunctor : protected DerivativeFunctorBase {
|
||||
protected:
|
||||
using Jacobian = Eigen::Matrix<double, /*1xMN*/ 1, -1>;
|
||||
size_t rowIndex_;
|
||||
Jacobian H_;
|
||||
|
||||
size_t M_;
|
||||
size_t rowIndex_;
|
||||
|
||||
/*
|
||||
* Calculate the `1*(M*N)` Jacobian of this functor with respect to
|
||||
* the M*N parameter matrix `P`.
|
||||
|
@ -469,9 +461,9 @@ class Basis {
|
|||
* MxM identity matrix. See also VectorDerivativeFunctor.
|
||||
*/
|
||||
void calculateJacobian(size_t N) {
|
||||
H_.setZero(1, M * N);
|
||||
H_.setZero(1, M_ * N);
|
||||
for (int j = 0; j < this->weights_.size(); j++)
|
||||
H_(0, rowIndex_ + j * M) = this->weights_(j);
|
||||
H_(0, rowIndex_ + j * M_) = this->weights_(j);
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -479,29 +471,29 @@ class Basis {
|
|||
ComponentDerivativeFunctor() {}
|
||||
|
||||
/// Construct with row index
|
||||
ComponentDerivativeFunctor(size_t N, size_t i, double x)
|
||||
: DerivativeFunctorBase(N, x), rowIndex_(i) {
|
||||
ComponentDerivativeFunctor(size_t M, size_t N, size_t i, double x)
|
||||
: DerivativeFunctorBase(N, x), M_(M), rowIndex_(i) {
|
||||
calculateJacobian(N);
|
||||
}
|
||||
|
||||
/// Construct with row index and interval
|
||||
ComponentDerivativeFunctor(size_t N, size_t i, double x, double a, double b)
|
||||
: DerivativeFunctorBase(N, x, a, b), rowIndex_(i) {
|
||||
ComponentDerivativeFunctor(size_t M, size_t N, size_t i, double x, double a,
|
||||
double b)
|
||||
: DerivativeFunctorBase(N, x, a, b), M_(M), rowIndex_(i) {
|
||||
calculateJacobian(N);
|
||||
}
|
||||
/// Calculate derivative of component rowIndex_ of F
|
||||
double apply(const ParameterMatrix<M>& P,
|
||||
double apply(const Matrix& P,
|
||||
OptionalJacobian</*1xMN*/ -1, -1> H = {}) const {
|
||||
if (H) *H = H_;
|
||||
return P.row(rowIndex_) * this->weights_.transpose();
|
||||
}
|
||||
/// c++ sugar
|
||||
double operator()(const ParameterMatrix<M>& P,
|
||||
double operator()(const Matrix& P,
|
||||
OptionalJacobian</*1xMN*/ -1, -1> H = {}) const {
|
||||
return apply(P, H);
|
||||
}
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -75,7 +75,7 @@ class EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
|||
};
|
||||
|
||||
/**
|
||||
* Unary factor for enforcing BASIS polynomial evaluation on a ParameterMatrix
|
||||
* Unary factor for enforcing BASIS polynomial evaluation on a parameter Matrix
|
||||
* of size (M, N) is equal to a vector-valued measurement at the same point,
|
||||
when
|
||||
* using a pseudo-spectral parameterization.
|
||||
|
@ -87,15 +87,13 @@ class EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
|||
* measurement prediction function.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param M: Size of the evaluated state vector.
|
||||
*
|
||||
* @ingroup basis
|
||||
*/
|
||||
template <class BASIS, int M>
|
||||
class VectorEvaluationFactor
|
||||
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
|
||||
template <class BASIS>
|
||||
class VectorEvaluationFactor : public FunctorizedFactor<Vector, Matrix> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
|
||||
using Base = FunctorizedFactor<Vector, Matrix>;
|
||||
|
||||
public:
|
||||
VectorEvaluationFactor() {}
|
||||
|
@ -103,42 +101,43 @@ class VectorEvaluationFactor
|
|||
/**
|
||||
* @brief Construct a new VectorEvaluationFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix object used to represent the
|
||||
* @param key The key to the parameter Matrix object used to represent the
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param M Size of the evaluated state vector.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
VectorEvaluationFactor(Key key, const Vector &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
double x)
|
||||
: Base(key, z, model,
|
||||
typename BASIS::template VectorEvaluationFunctor<M>(N, x)) {}
|
||||
const SharedNoiseModel &model, const size_t M,
|
||||
const size_t N, double x)
|
||||
: Base(key, z, model, typename BASIS::VectorEvaluationFunctor(M, N, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorEvaluationFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix object used to represent the
|
||||
* @param key The key to the parameter Matrix object used to represent the
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param M Size of the evaluated state vector.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
VectorEvaluationFactor(Key key, const Vector &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
double x, double a, double b)
|
||||
const SharedNoiseModel &model, const size_t M,
|
||||
const size_t N, double x, double a, double b)
|
||||
: Base(key, z, model,
|
||||
typename BASIS::template VectorEvaluationFunctor<M>(N, x, a, b)) {}
|
||||
typename BASIS::VectorEvaluationFunctor(M, N, x, a, b)) {}
|
||||
|
||||
virtual ~VectorEvaluationFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* Unary factor for enforcing BASIS polynomial evaluation on a ParameterMatrix
|
||||
* Unary factor for enforcing BASIS polynomial evaluation on a parameter Matrix
|
||||
* of size (P, N) is equal to specified measurement at the same point, when
|
||||
* using a pseudo-spectral parameterization.
|
||||
*
|
||||
|
@ -147,20 +146,18 @@ class VectorEvaluationFactor
|
|||
* indexed by `i`.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param P: Size of the fixed-size vector.
|
||||
*
|
||||
* Example:
|
||||
* VectorComponentFactor<BASIS, P> controlPrior(key, measured, model,
|
||||
* VectorComponentFactor<BASIS> controlPrior(key, measured, model,
|
||||
* N, i, t, a, b);
|
||||
* where N is the degree and i is the component index.
|
||||
*
|
||||
* @ingroup basis
|
||||
*/
|
||||
template <class BASIS, size_t P>
|
||||
class VectorComponentFactor
|
||||
: public FunctorizedFactor<double, ParameterMatrix<P>> {
|
||||
template <class BASIS>
|
||||
class VectorComponentFactor : public FunctorizedFactor<double, Matrix> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
|
||||
using Base = FunctorizedFactor<double, Matrix>;
|
||||
|
||||
public:
|
||||
VectorComponentFactor() {}
|
||||
|
@ -168,29 +165,31 @@ class VectorComponentFactor
|
|||
/**
|
||||
* @brief Construct a new VectorComponentFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix object used to represent the
|
||||
* @param key The key to the parameter Matrix object used to represent the
|
||||
* polynomial.
|
||||
* @param z The scalar value at a specified index `i` of the full measurement
|
||||
* vector.
|
||||
* @param model The noise model.
|
||||
* @param P Size of the fixed-size vector.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param i The index for the evaluated vector to give us the desired scalar
|
||||
* value.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
VectorComponentFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, size_t i, double x)
|
||||
const size_t P, const size_t N, size_t i, double x)
|
||||
: Base(key, z, model,
|
||||
typename BASIS::template VectorComponentFunctor<P>(N, i, x)) {}
|
||||
typename BASIS::VectorComponentFunctor(P, N, i, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorComponentFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix object used to represent the
|
||||
* @param key The key to the parameter Matrix object used to represent the
|
||||
* polynomial.
|
||||
* @param z The scalar value at a specified index `i` of the full measurement
|
||||
* vector.
|
||||
* @param model The noise model.
|
||||
* @param P Size of the fixed-size vector.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param i The index for the evaluated vector to give us the desired scalar
|
||||
* value.
|
||||
|
@ -199,11 +198,10 @@ class VectorComponentFactor
|
|||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
VectorComponentFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, size_t i, double x, double a, double b)
|
||||
: Base(
|
||||
key, z, model,
|
||||
typename BASIS::template VectorComponentFunctor<P>(N, i, x, a, b)) {
|
||||
}
|
||||
const size_t P, const size_t N, size_t i, double x,
|
||||
double a, double b)
|
||||
: Base(key, z, model,
|
||||
typename BASIS::VectorComponentFunctor(P, N, i, x, a, b)) {}
|
||||
|
||||
virtual ~VectorComponentFactor() {}
|
||||
};
|
||||
|
@ -226,10 +224,9 @@ class VectorComponentFactor
|
|||
* where `x` is the value (e.g. timestep) at which the rotation was evaluated.
|
||||
*/
|
||||
template <class BASIS, typename T>
|
||||
class ManifoldEvaluationFactor
|
||||
: public FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>> {
|
||||
class ManifoldEvaluationFactor : public FunctorizedFactor<T, Matrix> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>>;
|
||||
using Base = FunctorizedFactor<T, Matrix>;
|
||||
|
||||
public:
|
||||
ManifoldEvaluationFactor() {}
|
||||
|
@ -289,7 +286,7 @@ class DerivativeFactor
|
|||
/**
|
||||
* @brief Construct a new DerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* @param key The key to the parameter Matrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
|
@ -303,7 +300,7 @@ class DerivativeFactor
|
|||
/**
|
||||
* @brief Construct a new DerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* @param key The key to the parameter Matrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
|
@ -324,14 +321,12 @@ class DerivativeFactor
|
|||
* polynomial at a specified point `x` is equal to the vector value `z`.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param M: Size of the evaluated state vector derivative.
|
||||
*/
|
||||
template <class BASIS, int M>
|
||||
class VectorDerivativeFactor
|
||||
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
|
||||
template <class BASIS>
|
||||
class VectorDerivativeFactor : public FunctorizedFactor<Vector, Matrix> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
|
||||
using Func = typename BASIS::template VectorDerivativeFunctor<M>;
|
||||
using Base = FunctorizedFactor<Vector, Matrix>;
|
||||
using Func = typename BASIS::VectorDerivativeFunctor;
|
||||
|
||||
public:
|
||||
VectorDerivativeFactor() {}
|
||||
|
@ -339,34 +334,36 @@ class VectorDerivativeFactor
|
|||
/**
|
||||
* @brief Construct a new VectorDerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* @param key The key to the parameter Matrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param M Size of the evaluated state vector derivative.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
VectorDerivativeFactor(Key key, const Vector &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
double x)
|
||||
: Base(key, z, model, Func(N, x)) {}
|
||||
const SharedNoiseModel &model, const size_t M,
|
||||
const size_t N, double x)
|
||||
: Base(key, z, model, Func(M, N, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorDerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* @param key The key to the parameter Matrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param M Size of the evaluated state vector derivative.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
VectorDerivativeFactor(Key key, const Vector &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
double x, double a, double b)
|
||||
: Base(key, z, model, Func(N, x, a, b)) {}
|
||||
const SharedNoiseModel &model, const size_t M,
|
||||
const size_t N, double x, double a, double b)
|
||||
: Base(key, z, model, Func(M, N, x, a, b)) {}
|
||||
|
||||
virtual ~VectorDerivativeFactor() {}
|
||||
};
|
||||
|
@ -377,14 +374,12 @@ class VectorDerivativeFactor
|
|||
* vector-valued measurement `z`.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param P: Size of the control component derivative.
|
||||
*/
|
||||
template <class BASIS, int P>
|
||||
class ComponentDerivativeFactor
|
||||
: public FunctorizedFactor<double, ParameterMatrix<P>> {
|
||||
template <class BASIS>
|
||||
class ComponentDerivativeFactor : public FunctorizedFactor<double, Matrix> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
|
||||
using Func = typename BASIS::template ComponentDerivativeFunctor<P>;
|
||||
using Base = FunctorizedFactor<double, Matrix>;
|
||||
using Func = typename BASIS::ComponentDerivativeFunctor;
|
||||
|
||||
public:
|
||||
ComponentDerivativeFactor() {}
|
||||
|
@ -392,29 +387,31 @@ class ComponentDerivativeFactor
|
|||
/**
|
||||
* @brief Construct a new ComponentDerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* @param key The key to the parameter Matrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The scalar measurement value at a specific index `i` of the full
|
||||
* measurement vector.
|
||||
* @param model The degree of the polynomial.
|
||||
* @param P: Size of the control component derivative.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param i The index for the evaluated vector to give us the desired scalar
|
||||
* value.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
ComponentDerivativeFactor(Key key, const double &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
size_t i, double x)
|
||||
: Base(key, z, model, Func(N, i, x)) {}
|
||||
const SharedNoiseModel &model, const size_t P,
|
||||
const size_t N, size_t i, double x)
|
||||
: Base(key, z, model, Func(P, N, i, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new ComponentDerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* @param key The key to the parameter Matrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The scalar measurement value at a specific index `i` of the full
|
||||
* measurement vector.
|
||||
* @param model The degree of the polynomial.
|
||||
* @param P: Size of the control component derivative.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param i The index for the evaluated vector to give us the desired scalar
|
||||
* value.
|
||||
|
@ -423,9 +420,10 @@ class ComponentDerivativeFactor
|
|||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
ComponentDerivativeFactor(Key key, const double &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
size_t i, double x, double a, double b)
|
||||
: Base(key, z, model, Func(N, i, x, a, b)) {}
|
||||
const SharedNoiseModel &model, const size_t P,
|
||||
const size_t N, size_t i, double x, double a,
|
||||
double b)
|
||||
: Base(key, z, model, Func(P, N, i, x, a, b)) {}
|
||||
|
||||
virtual ~ComponentDerivativeFactor() {}
|
||||
};
|
||||
|
|
|
@ -1,215 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file ParameterMatrix.h
|
||||
* @brief Define ParameterMatrix class which is used to store values at
|
||||
* interpolation points.
|
||||
* @author Varun Agrawal, Frank Dellaert
|
||||
* @date September 21, 2020
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/VectorSpace.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* A matrix abstraction of MxN values at the Basis points.
|
||||
* This class serves as a wrapper over an Eigen matrix.
|
||||
* @tparam M: The dimension of the type you wish to evaluate.
|
||||
* @param N: the number of Basis points (e.g. Chebyshev points of the second
|
||||
* kind).
|
||||
*/
|
||||
template <int M>
|
||||
class ParameterMatrix {
|
||||
using MatrixType = Eigen::Matrix<double, M, -1>;
|
||||
|
||||
private:
|
||||
MatrixType matrix_;
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
enum { dimension = Eigen::Dynamic };
|
||||
|
||||
/**
|
||||
* Create ParameterMatrix using the number of basis points.
|
||||
* @param N: The number of basis points (the columns).
|
||||
*/
|
||||
ParameterMatrix(const size_t N) : matrix_(M, N) { matrix_.setZero(); }
|
||||
|
||||
/**
|
||||
* Create ParameterMatrix from an MxN Eigen Matrix.
|
||||
* @param matrix: An Eigen matrix used to initialze the ParameterMatrix.
|
||||
*/
|
||||
ParameterMatrix(const MatrixType& matrix) : matrix_(matrix) {}
|
||||
|
||||
/// Get the number of rows.
|
||||
size_t rows() const { return matrix_.rows(); }
|
||||
|
||||
/// Get the number of columns.
|
||||
size_t cols() const { return matrix_.cols(); }
|
||||
|
||||
/// Get the underlying matrix.
|
||||
MatrixType matrix() const { return matrix_; }
|
||||
|
||||
/// Return the tranpose of the underlying matrix.
|
||||
Eigen::Matrix<double, -1, M> transpose() const { return matrix_.transpose(); }
|
||||
|
||||
/**
|
||||
* Get the matrix row specified by `index`.
|
||||
* @param index: The row index to retrieve.
|
||||
*/
|
||||
Eigen::Matrix<double, 1, -1> row(size_t index) const {
|
||||
return matrix_.row(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the matrix row specified by `index`.
|
||||
* @param index: The row index to set.
|
||||
*/
|
||||
auto row(size_t index) -> Eigen::Block<MatrixType, 1, -1, false> {
|
||||
return matrix_.row(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the matrix column specified by `index`.
|
||||
* @param index: The column index to retrieve.
|
||||
*/
|
||||
Eigen::Matrix<double, M, 1> col(size_t index) const {
|
||||
return matrix_.col(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the matrix column specified by `index`.
|
||||
* @param index: The column index to set.
|
||||
*/
|
||||
auto col(size_t index) -> Eigen::Block<MatrixType, M, 1, true> {
|
||||
return matrix_.col(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set all matrix coefficients to zero.
|
||||
*/
|
||||
void setZero() { matrix_.setZero(); }
|
||||
|
||||
/**
|
||||
* Add a ParameterMatrix to another.
|
||||
* @param other: ParameterMatrix to add.
|
||||
*/
|
||||
ParameterMatrix<M> operator+(const ParameterMatrix<M>& other) const {
|
||||
return ParameterMatrix<M>(matrix_ + other.matrix());
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a MxN-sized vector to the ParameterMatrix.
|
||||
* @param other: Vector which is reshaped and added.
|
||||
*/
|
||||
ParameterMatrix<M> operator+(
|
||||
const Eigen::Matrix<double, -1, 1>& other) const {
|
||||
// This form avoids a deep copy and instead typecasts `other`.
|
||||
Eigen::Map<const MatrixType> other_(other.data(), M, cols());
|
||||
return ParameterMatrix<M>(matrix_ + other_);
|
||||
}
|
||||
|
||||
/**
|
||||
* Subtract a ParameterMatrix from another.
|
||||
* @param other: ParameterMatrix to subtract.
|
||||
*/
|
||||
ParameterMatrix<M> operator-(const ParameterMatrix<M>& other) const {
|
||||
return ParameterMatrix<M>(matrix_ - other.matrix());
|
||||
}
|
||||
|
||||
/**
|
||||
* Subtract a MxN-sized vector from the ParameterMatrix.
|
||||
* @param other: Vector which is reshaped and subracted.
|
||||
*/
|
||||
ParameterMatrix<M> operator-(
|
||||
const Eigen::Matrix<double, -1, 1>& other) const {
|
||||
Eigen::Map<const MatrixType> other_(other.data(), M, cols());
|
||||
return ParameterMatrix<M>(matrix_ - other_);
|
||||
}
|
||||
|
||||
/**
|
||||
* Multiply ParameterMatrix with an Eigen matrix.
|
||||
* @param other: Eigen matrix which should be multiplication compatible with
|
||||
* the ParameterMatrix.
|
||||
*/
|
||||
MatrixType operator*(const Eigen::Matrix<double, -1, -1>& other) const {
|
||||
return matrix_ * other;
|
||||
}
|
||||
|
||||
/// @name Vector Space requirements
|
||||
/// @{
|
||||
|
||||
/**
|
||||
* Print the ParameterMatrix.
|
||||
* @param s: The prepend string to add more contextual info.
|
||||
*/
|
||||
void print(const std::string& s = "") const {
|
||||
std::cout << (s == "" ? s : s + " ") << matrix_ << std::endl;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for equality up to absolute tolerance.
|
||||
* @param other: The ParameterMatrix to check equality with.
|
||||
* @param tol: The absolute tolerance threshold.
|
||||
*/
|
||||
bool equals(const ParameterMatrix<M>& other, double tol = 1e-8) const {
|
||||
return gtsam::equal_with_abs_tol(matrix_, other.matrix(), tol);
|
||||
}
|
||||
|
||||
/// Returns dimensionality of the tangent space
|
||||
inline size_t dim() const { return matrix_.size(); }
|
||||
|
||||
/// Convert to vector form, is done row-wise
|
||||
inline Vector vector() const {
|
||||
using RowMajor = Eigen::Matrix<double, -1, -1, Eigen::RowMajor>;
|
||||
Vector result(matrix_.size());
|
||||
Eigen::Map<RowMajor>(&result(0), rows(), cols()) = matrix_;
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Identity function to satisfy VectorSpace traits.
|
||||
*
|
||||
* NOTE: The size at compile time is unknown so this identity is zero
|
||||
* length and thus not valid.
|
||||
*/
|
||||
inline static ParameterMatrix Identity() {
|
||||
// throw std::runtime_error(
|
||||
// "ParameterMatrix::Identity(): Don't use this function");
|
||||
return ParameterMatrix(0);
|
||||
}
|
||||
|
||||
/// @}
|
||||
};
|
||||
|
||||
// traits for ParameterMatrix
|
||||
template <int M>
|
||||
struct traits<ParameterMatrix<M>>
|
||||
: public internal::VectorSpace<ParameterMatrix<M>> {};
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Stream operator that takes a ParameterMatrix. Used for printing.
|
||||
template <int M>
|
||||
inline std::ostream& operator<<(std::ostream& os,
|
||||
const ParameterMatrix<M>& parameterMatrix) {
|
||||
os << parameterMatrix.matrix();
|
||||
return os;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -46,18 +46,6 @@ class Chebyshev2 {
|
|||
static Matrix DifferentiationMatrix(size_t N, double a, double b);
|
||||
};
|
||||
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
|
||||
template <M = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}>
|
||||
class ParameterMatrix {
|
||||
ParameterMatrix(const size_t N);
|
||||
ParameterMatrix(const Matrix& matrix);
|
||||
|
||||
Matrix matrix() const;
|
||||
|
||||
void print(const string& s = "") const;
|
||||
};
|
||||
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
|
||||
template <BASIS = {gtsam::Chebyshev2, gtsam::Chebyshev1Basis,
|
||||
|
@ -72,45 +60,36 @@ virtual class EvaluationFactor : gtsam::NoiseModelFactor {
|
|||
double x, double a, double b);
|
||||
};
|
||||
|
||||
template <BASIS, M>
|
||||
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
|
||||
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
|
||||
virtual class VectorEvaluationFactor : gtsam::NoiseModelFactor {
|
||||
VectorEvaluationFactor();
|
||||
VectorEvaluationFactor(gtsam::Key key, const Vector& z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x);
|
||||
const gtsam::noiseModel::Base* model, const size_t M,
|
||||
const size_t N, double x);
|
||||
VectorEvaluationFactor(gtsam::Key key, const Vector& z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x, double a, double b);
|
||||
const gtsam::noiseModel::Base* model, const size_t M,
|
||||
const size_t N, double x, double a, double b);
|
||||
};
|
||||
|
||||
// TODO(Varun) Better way to support arbitrary dimensions?
|
||||
// Especially if users mainly do `pip install gtsam` for the Python wrapper.
|
||||
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 3>
|
||||
VectorEvaluationFactorChebyshev2D3;
|
||||
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 4>
|
||||
VectorEvaluationFactorChebyshev2D4;
|
||||
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 12>
|
||||
VectorEvaluationFactorChebyshev2D12;
|
||||
|
||||
template <BASIS, P>
|
||||
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
|
||||
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
|
||||
virtual class VectorComponentFactor : gtsam::NoiseModelFactor {
|
||||
VectorComponentFactor();
|
||||
VectorComponentFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
size_t i, double x);
|
||||
const gtsam::noiseModel::Base* model, const size_t M,
|
||||
const size_t N, size_t i, double x);
|
||||
VectorComponentFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
size_t i, double x, double a, double b);
|
||||
const gtsam::noiseModel::Base* model, const size_t M,
|
||||
const size_t N, size_t i, double x, double a, double b);
|
||||
};
|
||||
|
||||
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 3>
|
||||
VectorComponentFactorChebyshev2D3;
|
||||
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 4>
|
||||
VectorComponentFactorChebyshev2D4;
|
||||
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 12>
|
||||
VectorComponentFactorChebyshev2D12;
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
|
||||
template <BASIS, T>
|
||||
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
|
||||
gtsam::Chebyshev2Basis, gtsam::Chebyshev2},
|
||||
T = {gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3}>
|
||||
virtual class ManifoldEvaluationFactor : gtsam::NoiseModelFactor {
|
||||
ManifoldEvaluationFactor();
|
||||
ManifoldEvaluationFactor(gtsam::Key key, const T& z,
|
||||
|
@ -121,8 +100,42 @@ virtual class ManifoldEvaluationFactor : gtsam::NoiseModelFactor {
|
|||
double x, double a, double b);
|
||||
};
|
||||
|
||||
// TODO(gerry): Add `DerivativeFactor`, `VectorDerivativeFactor`, and
|
||||
// `ComponentDerivativeFactor`
|
||||
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
|
||||
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
|
||||
virtual class DerivativeFactor : gtsam::NoiseModelFactor {
|
||||
DerivativeFactor();
|
||||
DerivativeFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x);
|
||||
DerivativeFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x, double a, double b);
|
||||
};
|
||||
|
||||
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
|
||||
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
|
||||
virtual class VectorDerivativeFactor : gtsam::NoiseModelFactor {
|
||||
VectorDerivativeFactor();
|
||||
VectorDerivativeFactor(gtsam::Key key, const Vector& z,
|
||||
const gtsam::noiseModel::Base* model, const size_t M,
|
||||
const size_t N, double x);
|
||||
VectorDerivativeFactor(gtsam::Key key, const Vector& z,
|
||||
const gtsam::noiseModel::Base* model, const size_t M,
|
||||
const size_t N, double x, double a, double b);
|
||||
};
|
||||
|
||||
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
|
||||
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
|
||||
virtual class ComponentDerivativeFactor : gtsam::NoiseModelFactor {
|
||||
ComponentDerivativeFactor();
|
||||
ComponentDerivativeFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model,
|
||||
const size_t P, const size_t N, size_t i, double x);
|
||||
ComponentDerivativeFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model,
|
||||
const size_t P, const size_t N, size_t i, double x,
|
||||
double a, double b);
|
||||
};
|
||||
|
||||
#include <gtsam/basis/FitBasis.h>
|
||||
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
|
||||
|
|
|
@ -17,30 +17,28 @@
|
|||
* @brief unit tests for factors in BasisFactors.h
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/basis/Basis.h>
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/nonlinear/FunctorizedFactor.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
using gtsam::noiseModel::Isotropic;
|
||||
using gtsam::Pose2;
|
||||
using gtsam::Vector;
|
||||
using gtsam::Values;
|
||||
using gtsam::Chebyshev2;
|
||||
using gtsam::ParameterMatrix;
|
||||
using gtsam::LevenbergMarquardtParams;
|
||||
using gtsam::LevenbergMarquardtOptimizer;
|
||||
using gtsam::LevenbergMarquardtParams;
|
||||
using gtsam::NonlinearFactorGraph;
|
||||
using gtsam::NonlinearOptimizerParams;
|
||||
using gtsam::Pose2;
|
||||
using gtsam::Values;
|
||||
using gtsam::Vector;
|
||||
using gtsam::noiseModel::Isotropic;
|
||||
|
||||
constexpr size_t N = 2;
|
||||
|
||||
|
@ -81,15 +79,15 @@ TEST(BasisFactors, VectorEvaluationFactor) {
|
|||
const Vector measured = Vector::Zero(M);
|
||||
|
||||
auto model = Isotropic::Sigma(M, 1.0);
|
||||
VectorEvaluationFactor<Chebyshev2, M> factor(key, measured, model, N, 0);
|
||||
VectorEvaluationFactor<Chebyshev2> factor(key, measured, model, M, N, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(factor);
|
||||
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(M, N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
initial.insert<gtsam::Matrix>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
|
@ -107,7 +105,7 @@ TEST(BasisFactors, Print) {
|
|||
const Vector measured = Vector::Ones(M) * 42;
|
||||
|
||||
auto model = Isotropic::Sigma(M, 1.0);
|
||||
VectorEvaluationFactor<Chebyshev2, M> factor(key, measured, model, N, 0);
|
||||
VectorEvaluationFactor<Chebyshev2> factor(key, measured, model, M, N, 0);
|
||||
|
||||
std::string expected =
|
||||
" keys = { X0 }\n"
|
||||
|
@ -128,16 +126,16 @@ TEST(BasisFactors, VectorComponentFactor) {
|
|||
const size_t i = 2;
|
||||
const double measured = 0.0, t = 3.0, a = 2.0, b = 4.0;
|
||||
auto model = Isotropic::Sigma(1, 1.0);
|
||||
VectorComponentFactor<Chebyshev2, P> factor(key, measured, model, N, i,
|
||||
t, a, b);
|
||||
VectorComponentFactor<Chebyshev2> factor(key, measured, model, P, N, i, t, a,
|
||||
b);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(factor);
|
||||
|
||||
ParameterMatrix<P> stateMatrix(N);
|
||||
gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(P, N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<P>>(key, stateMatrix);
|
||||
initial.insert<gtsam::Matrix>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
|
@ -153,16 +151,16 @@ TEST(BasisFactors, ManifoldEvaluationFactor) {
|
|||
const Pose2 measured;
|
||||
const double t = 3.0, a = 2.0, b = 4.0;
|
||||
auto model = Isotropic::Sigma(3, 1.0);
|
||||
ManifoldEvaluationFactor<Chebyshev2, Pose2> factor(key, measured, model, N,
|
||||
t, a, b);
|
||||
ManifoldEvaluationFactor<Chebyshev2, Pose2> factor(key, measured, model, N, t,
|
||||
a, b);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(factor);
|
||||
|
||||
ParameterMatrix<3> stateMatrix(N);
|
||||
gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(3, N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<3>>(key, stateMatrix);
|
||||
initial.insert<gtsam::Matrix>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
|
@ -170,6 +168,8 @@ TEST(BasisFactors, ManifoldEvaluationFactor) {
|
|||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
// Check Jacobians
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, initial, 1e-7, 1e-5);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
@ -179,15 +179,15 @@ TEST(BasisFactors, VecDerivativePrior) {
|
|||
|
||||
const Vector measured = Vector::Zero(M);
|
||||
auto model = Isotropic::Sigma(M, 1.0);
|
||||
VectorDerivativeFactor<Chebyshev2, M> vecDPrior(key, measured, model, N, 0);
|
||||
VectorDerivativeFactor<Chebyshev2> vecDPrior(key, measured, model, M, N, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(vecDPrior);
|
||||
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(M, N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
initial.insert<gtsam::Matrix>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
|
@ -204,15 +204,15 @@ TEST(BasisFactors, ComponentDerivativeFactor) {
|
|||
|
||||
double measured = 0;
|
||||
auto model = Isotropic::Sigma(1, 1.0);
|
||||
ComponentDerivativeFactor<Chebyshev2, M> controlDPrior(key, measured, model,
|
||||
ComponentDerivativeFactor<Chebyshev2> controlDPrior(key, measured, model, M,
|
||||
N, 0, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(controlDPrior);
|
||||
|
||||
Values initial;
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(M, N);
|
||||
initial.insert<gtsam::Matrix>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
|
|
|
@ -17,14 +17,15 @@
|
|||
* methods
|
||||
*/
|
||||
|
||||
#include <cstddef>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/basis/FitBasis.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <cstddef>
|
||||
#include <functional>
|
||||
|
||||
using namespace std;
|
||||
|
@ -107,7 +108,7 @@ TEST(Chebyshev2, InterpolateVector) {
|
|||
double t = 30, a = 0, b = 100;
|
||||
const size_t N = 3;
|
||||
// Create 2x3 matrix with Vectors at Chebyshev points
|
||||
ParameterMatrix<2> X(N);
|
||||
Matrix X = Matrix::Zero(2, N);
|
||||
X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp
|
||||
|
||||
// Check value
|
||||
|
@ -115,14 +116,15 @@ TEST(Chebyshev2, InterpolateVector) {
|
|||
expected << t, 0;
|
||||
Eigen::Matrix<double, /*2x2N*/ -1, -1> actualH(2, 2 * N);
|
||||
|
||||
Chebyshev2::VectorEvaluationFunctor<2> fx(N, t, a, b);
|
||||
Chebyshev2::VectorEvaluationFunctor fx(2, N, t, a, b);
|
||||
EXPECT(assert_equal(expected, fx(X, actualH), 1e-9));
|
||||
|
||||
// Check derivative
|
||||
std::function<Vector2(ParameterMatrix<2>)> f = std::bind(
|
||||
&Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, std::placeholders::_1, nullptr);
|
||||
std::function<Vector2(Matrix)> f =
|
||||
std::bind(&Chebyshev2::VectorEvaluationFunctor::operator(), fx,
|
||||
std::placeholders::_1, nullptr);
|
||||
Matrix numericalH =
|
||||
numericalDerivative11<Vector2, ParameterMatrix<2>, 2 * N>(f, X);
|
||||
numericalDerivative11<Vector2, Matrix, 2 * N>(f, X);
|
||||
EXPECT(assert_equal(numericalH, actualH, 1e-9));
|
||||
}
|
||||
|
||||
|
@ -131,19 +133,60 @@ TEST(Chebyshev2, InterpolateVector) {
|
|||
TEST(Chebyshev2, InterpolatePose2) {
|
||||
double t = 30, a = 0, b = 100;
|
||||
|
||||
ParameterMatrix<3> X(N);
|
||||
Matrix X(3, N);
|
||||
X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp
|
||||
X.row(1) = Vector::Zero(N);
|
||||
X.row(2) = 0.1 * Vector::Ones(N);
|
||||
|
||||
Vector xi(3);
|
||||
xi << t, 0, 0.1;
|
||||
Eigen::Matrix<double, /*3x3N*/ -1, -1> actualH(3, 3 * N);
|
||||
|
||||
Chebyshev2::ManifoldEvaluationFunctor<Pose2> fx(N, t, a, b);
|
||||
// We use xi as canonical coordinates via exponential map
|
||||
Pose2 expected = Pose2::ChartAtOrigin::Retract(xi);
|
||||
EXPECT(assert_equal(expected, fx(X)));
|
||||
EXPECT(assert_equal(expected, fx(X, actualH)));
|
||||
|
||||
// Check derivative
|
||||
std::function<Pose2(Matrix)> f =
|
||||
std::bind(&Chebyshev2::ManifoldEvaluationFunctor<Pose2>::operator(), fx,
|
||||
std::placeholders::_1, nullptr);
|
||||
Matrix numericalH =
|
||||
numericalDerivative11<Pose2, Matrix, 3 * N>(f, X);
|
||||
EXPECT(assert_equal(numericalH, actualH, 1e-9));
|
||||
}
|
||||
|
||||
#ifdef GTSAM_POSE3_EXPMAP
|
||||
//******************************************************************************
|
||||
// Interpolating poses using the exponential map
|
||||
TEST(Chebyshev2, InterpolatePose3) {
|
||||
double a = 10, b = 100;
|
||||
double t = Chebyshev2::Points(N, a, b)(11);
|
||||
|
||||
Rot3 R = Rot3::Ypr(-2.21366492e-05, -9.35353636e-03, -5.90463598e-04);
|
||||
Pose3 pose(R, Point3(1, 2, 3));
|
||||
|
||||
Vector6 xi = Pose3::ChartAtOrigin::Local(pose);
|
||||
Eigen::Matrix<double, /*6x6N*/ -1, -1> actualH(6, 6 * N);
|
||||
|
||||
Matrix X = Matrix::Zero(6, N);
|
||||
X.col(11) = xi;
|
||||
|
||||
Chebyshev2::ManifoldEvaluationFunctor<Pose3> fx(N, t, a, b);
|
||||
// We use xi as canonical coordinates via exponential map
|
||||
Pose3 expected = Pose3::ChartAtOrigin::Retract(xi);
|
||||
EXPECT(assert_equal(expected, fx(X, actualH)));
|
||||
|
||||
// Check derivative
|
||||
std::function<Pose3(Matrix)> f =
|
||||
std::bind(&Chebyshev2::ManifoldEvaluationFunctor<Pose3>::operator(), fx,
|
||||
std::placeholders::_1, nullptr);
|
||||
Matrix numericalH =
|
||||
numericalDerivative11<Pose3, Matrix, 6 * N>(f, X);
|
||||
EXPECT(assert_equal(numericalH, actualH, 1e-8));
|
||||
}
|
||||
#endif
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, Decomposition) {
|
||||
// Create example sequence
|
||||
|
@ -370,14 +413,14 @@ TEST(Chebyshev2, Derivative6_03) {
|
|||
TEST(Chebyshev2, VectorDerivativeFunctor) {
|
||||
const size_t N = 3, M = 2;
|
||||
const double x = 0.2;
|
||||
using VecD = Chebyshev2::VectorDerivativeFunctor<M>;
|
||||
VecD fx(N, x, 0, 3);
|
||||
ParameterMatrix<M> X(N);
|
||||
using VecD = Chebyshev2::VectorDerivativeFunctor;
|
||||
VecD fx(M, N, x, 0, 3);
|
||||
Matrix X = Matrix::Zero(M, N);
|
||||
Matrix actualH(M, M * N);
|
||||
EXPECT(assert_equal(Vector::Zero(M), (Vector)fx(X, actualH), 1e-8));
|
||||
|
||||
// Test Jacobian
|
||||
Matrix expectedH = numericalDerivative11<Vector2, ParameterMatrix<M>, M * N>(
|
||||
Matrix expectedH = numericalDerivative11<Vector2, Matrix, M * N>(
|
||||
std::bind(&VecD::operator(), fx, std::placeholders::_1, nullptr), X);
|
||||
EXPECT(assert_equal(expectedH, actualH, 1e-7));
|
||||
}
|
||||
|
@ -386,30 +429,29 @@ TEST(Chebyshev2, VectorDerivativeFunctor) {
|
|||
// Test VectorDerivativeFunctor with polynomial function
|
||||
TEST(Chebyshev2, VectorDerivativeFunctor2) {
|
||||
const size_t N = 64, M = 1, T = 15;
|
||||
using VecD = Chebyshev2::VectorDerivativeFunctor<M>;
|
||||
using VecD = Chebyshev2::VectorDerivativeFunctor;
|
||||
|
||||
const Vector points = Chebyshev2::Points(N, 0, T);
|
||||
|
||||
// Assign the parameter matrix
|
||||
Vector values(N);
|
||||
// Assign the parameter matrix 1xN
|
||||
Matrix X(1, N);
|
||||
for (size_t i = 0; i < N; ++i) {
|
||||
values(i) = f(points(i));
|
||||
X(i) = f(points(i));
|
||||
}
|
||||
ParameterMatrix<M> X(values);
|
||||
|
||||
// Evaluate the derivative at the chebyshev points using
|
||||
// VectorDerivativeFunctor.
|
||||
for (size_t i = 0; i < N; ++i) {
|
||||
VecD d(N, points(i), 0, T);
|
||||
VecD d(M, N, points(i), 0, T);
|
||||
Vector1 Dx = d(X);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(points(i)), Dx(0), 1e-6);
|
||||
}
|
||||
|
||||
// Test Jacobian at the first chebyshev point.
|
||||
Matrix actualH(M, M * N);
|
||||
VecD vecd(N, points(0), 0, T);
|
||||
VecD vecd(M, N, points(0), 0, T);
|
||||
vecd(X, actualH);
|
||||
Matrix expectedH = numericalDerivative11<Vector1, ParameterMatrix<M>, M * N>(
|
||||
Matrix expectedH = numericalDerivative11<Vector1, Matrix, M * N>(
|
||||
std::bind(&VecD::operator(), vecd, std::placeholders::_1, nullptr), X);
|
||||
EXPECT(assert_equal(expectedH, actualH, 1e-6));
|
||||
}
|
||||
|
@ -419,14 +461,14 @@ TEST(Chebyshev2, VectorDerivativeFunctor2) {
|
|||
TEST(Chebyshev2, ComponentDerivativeFunctor) {
|
||||
const size_t N = 6, M = 2;
|
||||
const double x = 0.2;
|
||||
using CompFunc = Chebyshev2::ComponentDerivativeFunctor<M>;
|
||||
using CompFunc = Chebyshev2::ComponentDerivativeFunctor;
|
||||
size_t row = 1;
|
||||
CompFunc fx(N, row, x, 0, 3);
|
||||
ParameterMatrix<M> X(N);
|
||||
CompFunc fx(M, N, row, x, 0, 3);
|
||||
Matrix X = Matrix::Zero(M, N);
|
||||
Matrix actualH(1, M * N);
|
||||
EXPECT_DOUBLES_EQUAL(0, fx(X, actualH), 1e-8);
|
||||
|
||||
Matrix expectedH = numericalDerivative11<double, ParameterMatrix<M>, M * N>(
|
||||
Matrix expectedH = numericalDerivative11<double, Matrix, M * N>(
|
||||
std::bind(&CompFunc::operator(), fx, std::placeholders::_1, nullptr), X);
|
||||
EXPECT(assert_equal(expectedH, actualH, 1e-7));
|
||||
}
|
||||
|
|
|
@ -180,17 +180,16 @@ TEST(Basis, Derivative7) {
|
|||
|
||||
//******************************************************************************
|
||||
TEST(Basis, VecDerivativeFunctor) {
|
||||
using DotShape = typename FourierBasis::VectorDerivativeFunctor<2>;
|
||||
using DotShape = typename FourierBasis::VectorDerivativeFunctor;
|
||||
const size_t N = 3;
|
||||
|
||||
// MATLAB example, Dec 27 2019, commit 014eded5
|
||||
double h = 2 * M_PI / 16;
|
||||
Vector2 dotShape(0.5556, -0.8315); // at h/2
|
||||
DotShape dotShapeFunction(N, h / 2);
|
||||
Matrix23 theta_mat = (Matrix32() << 0, 0, 0.7071, 0.7071, 0.7071, -0.7071)
|
||||
DotShape dotShapeFunction(2, N, h / 2);
|
||||
Matrix theta = (Matrix32() << 0, 0, 0.7071, 0.7071, 0.7071, -0.7071)
|
||||
.finished()
|
||||
.transpose();
|
||||
ParameterMatrix<2> theta(theta_mat);
|
||||
EXPECT(assert_equal(Vector(dotShape), dotShapeFunction(theta), 1e-4));
|
||||
}
|
||||
|
||||
|
|
|
@ -1,145 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testParameterMatrix.cpp
|
||||
* @date Sep 22, 2020
|
||||
* @author Varun Agrawal, Frank Dellaert
|
||||
* @brief Unit tests for ParameterMatrix.h
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
using Parameters = Chebyshev2::Parameters;
|
||||
|
||||
const size_t M = 2, N = 5;
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Constructor) {
|
||||
ParameterMatrix<2> actual1(3);
|
||||
ParameterMatrix<2> expected1(Matrix::Zero(2, 3));
|
||||
EXPECT(assert_equal(expected1, actual1));
|
||||
|
||||
ParameterMatrix<2> actual2(Matrix::Ones(2, 3));
|
||||
ParameterMatrix<2> expected2(Matrix::Ones(2, 3));
|
||||
EXPECT(assert_equal(expected2, actual2));
|
||||
EXPECT(assert_equal(expected2.matrix(), actual2.matrix()));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Dimensions) {
|
||||
ParameterMatrix<M> params(N);
|
||||
EXPECT_LONGS_EQUAL(params.rows(), M);
|
||||
EXPECT_LONGS_EQUAL(params.cols(), N);
|
||||
EXPECT_LONGS_EQUAL(params.dim(), M * N);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Getters) {
|
||||
ParameterMatrix<M> params(N);
|
||||
|
||||
Matrix expectedMatrix = Matrix::Zero(2, 5);
|
||||
EXPECT(assert_equal(expectedMatrix, params.matrix()));
|
||||
|
||||
Matrix expectedMatrixTranspose = Matrix::Zero(5, 2);
|
||||
EXPECT(assert_equal(expectedMatrixTranspose, params.transpose()));
|
||||
|
||||
ParameterMatrix<M> p2(Matrix::Ones(M, N));
|
||||
Vector expectedRowVector = Vector::Ones(N);
|
||||
for (size_t r = 0; r < M; ++r) {
|
||||
EXPECT(assert_equal(p2.row(r), expectedRowVector));
|
||||
}
|
||||
|
||||
ParameterMatrix<M> p3(2 * Matrix::Ones(M, N));
|
||||
Vector expectedColVector = 2 * Vector::Ones(M);
|
||||
for (size_t c = 0; c < M; ++c) {
|
||||
EXPECT(assert_equal(p3.col(c), expectedColVector));
|
||||
}
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Setters) {
|
||||
ParameterMatrix<M> params(Matrix::Zero(M, N));
|
||||
Matrix expected = Matrix::Zero(M, N);
|
||||
|
||||
// row
|
||||
params.row(0) = Vector::Ones(N);
|
||||
expected.row(0) = Vector::Ones(N);
|
||||
EXPECT(assert_equal(expected, params.matrix()));
|
||||
|
||||
// col
|
||||
params.col(2) = Vector::Ones(M);
|
||||
expected.col(2) = Vector::Ones(M);
|
||||
|
||||
EXPECT(assert_equal(expected, params.matrix()));
|
||||
|
||||
// setZero
|
||||
params.setZero();
|
||||
expected.setZero();
|
||||
EXPECT(assert_equal(expected, params.matrix()));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Addition) {
|
||||
ParameterMatrix<M> params(Matrix::Ones(M, N));
|
||||
ParameterMatrix<M> expected(2 * Matrix::Ones(M, N));
|
||||
|
||||
// Add vector
|
||||
EXPECT(assert_equal(expected, params + Vector::Ones(M * N)));
|
||||
// Add another ParameterMatrix
|
||||
ParameterMatrix<M> actual = params + ParameterMatrix<M>(Matrix::Ones(M, N));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Subtraction) {
|
||||
ParameterMatrix<M> params(2 * Matrix::Ones(M, N));
|
||||
ParameterMatrix<M> expected(Matrix::Ones(M, N));
|
||||
|
||||
// Subtract vector
|
||||
EXPECT(assert_equal(expected, params - Vector::Ones(M * N)));
|
||||
// Subtract another ParameterMatrix
|
||||
ParameterMatrix<M> actual = params - ParameterMatrix<M>(Matrix::Ones(M, N));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Multiplication) {
|
||||
ParameterMatrix<M> params(Matrix::Ones(M, N));
|
||||
Matrix multiplier = 2 * Matrix::Ones(N, 2);
|
||||
Matrix expected = 2 * N * Matrix::Ones(M, 2);
|
||||
EXPECT(assert_equal(expected, params * multiplier));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, VectorSpace) {
|
||||
ParameterMatrix<M> params(Matrix::Ones(M, N));
|
||||
// vector
|
||||
EXPECT(assert_equal(Vector::Ones(M * N), params.vector()));
|
||||
// identity
|
||||
EXPECT(assert_equal(ParameterMatrix<M>::Identity(),
|
||||
ParameterMatrix<M>(Matrix::Zero(M, 0))));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
//******************************************************************************
|
|
@ -21,7 +21,6 @@
|
|||
#include <gtsam/discrete/TableFactor.h>
|
||||
#include <gtsam/hybrid/HybridValues.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <utility>
|
||||
|
||||
using namespace std;
|
||||
|
@ -203,7 +202,7 @@ void TableFactor::print(const string& s, const KeyFormatter& formatter) const {
|
|||
cout << s;
|
||||
cout << " f[";
|
||||
for (auto&& key : keys())
|
||||
cout << boost::format(" (%1%,%2%),") % formatter(key) % cardinality(key);
|
||||
cout << " (" << formatter(key) << "," << cardinality(key) << "),";
|
||||
cout << " ]" << endl;
|
||||
for (SparseIt it(sparse_table_); it; ++it) {
|
||||
DiscreteValues assignment = findAssignments(it.index());
|
||||
|
|
|
@ -46,7 +46,9 @@ public:
|
|||
uL_(0), uR_(0), v_(0) {
|
||||
}
|
||||
|
||||
/** constructor */
|
||||
/** uL and uR represent the x-axis value of left and right frame coordinates respectively.
|
||||
v represents the y coordinate value. The y-axis value should be the same under the
|
||||
stereo constraint. */
|
||||
StereoPoint2(double uL, double uR, double v) :
|
||||
uL_(uL), uR_(uR), v_(v) {
|
||||
}
|
||||
|
|
|
@ -109,6 +109,7 @@ class Ordering {
|
|||
FACTOR_GRAPH = {gtsam::NonlinearFactorGraph, gtsam::DiscreteFactorGraph,
|
||||
gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph, gtsam::HybridGaussianFactorGraph}>
|
||||
static gtsam::Ordering Colamd(const FACTOR_GRAPH& graph);
|
||||
static gtsam::Ordering Colamd(const gtsam::VariableIndex& variableIndex);
|
||||
|
||||
template <
|
||||
FACTOR_GRAPH = {gtsam::NonlinearFactorGraph, gtsam::DiscreteFactorGraph,
|
||||
|
|
|
@ -25,7 +25,6 @@ namespace gtsam {
|
|||
#include <gtsam/geometry/Unit3.h>
|
||||
#include <gtsam/navigation/ImuBias.h>
|
||||
#include <gtsam/navigation/NavState.h>
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
|
||||
#include <gtsam/nonlinear/GraphvizFormatting.h>
|
||||
class GraphvizFormatting : gtsam::DotWriter {
|
||||
|
|
|
@ -25,7 +25,6 @@ namespace gtsam {
|
|||
#include <gtsam/geometry/Unit3.h>
|
||||
#include <gtsam/navigation/ImuBias.h>
|
||||
#include <gtsam/navigation/NavState.h>
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
|
||||
|
@ -96,21 +95,6 @@ class Values {
|
|||
void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias);
|
||||
void insert(size_t j, const gtsam::NavState& nav_state);
|
||||
void insert(size_t j, double c);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<1>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<2>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<3>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<4>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<5>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<6>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<7>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<8>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<9>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<10>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<11>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<12>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<13>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<14>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<15>& X);
|
||||
|
||||
template <T = {gtsam::Point2, gtsam::Point3}>
|
||||
void insert(size_t j, const T& val);
|
||||
|
@ -144,21 +128,6 @@ class Values {
|
|||
void update(size_t j, Vector vector);
|
||||
void update(size_t j, Matrix matrix);
|
||||
void update(size_t j, double c);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<1>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<2>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<3>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<4>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<5>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<6>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<7>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<8>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<9>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<10>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<11>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<12>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<13>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<14>& X);
|
||||
void update(size_t j, const gtsam::ParameterMatrix<15>& X);
|
||||
|
||||
void insert_or_assign(size_t j, const gtsam::Point2& point2);
|
||||
void insert_or_assign(size_t j, const gtsam::Point3& point3);
|
||||
|
@ -199,21 +168,6 @@ class Values {
|
|||
void insert_or_assign(size_t j, Vector vector);
|
||||
void insert_or_assign(size_t j, Matrix matrix);
|
||||
void insert_or_assign(size_t j, double c);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<1>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<2>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<3>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<4>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<5>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<6>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<7>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<8>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<9>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<10>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<11>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<12>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<13>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<14>& X);
|
||||
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<15>& X);
|
||||
|
||||
template <T = {gtsam::Point2,
|
||||
gtsam::Point3,
|
||||
|
@ -243,22 +197,7 @@ class Values {
|
|||
gtsam::NavState,
|
||||
Vector,
|
||||
Matrix,
|
||||
double,
|
||||
gtsam::ParameterMatrix<1>,
|
||||
gtsam::ParameterMatrix<2>,
|
||||
gtsam::ParameterMatrix<3>,
|
||||
gtsam::ParameterMatrix<4>,
|
||||
gtsam::ParameterMatrix<5>,
|
||||
gtsam::ParameterMatrix<6>,
|
||||
gtsam::ParameterMatrix<7>,
|
||||
gtsam::ParameterMatrix<8>,
|
||||
gtsam::ParameterMatrix<9>,
|
||||
gtsam::ParameterMatrix<10>,
|
||||
gtsam::ParameterMatrix<11>,
|
||||
gtsam::ParameterMatrix<12>,
|
||||
gtsam::ParameterMatrix<13>,
|
||||
gtsam::ParameterMatrix<14>,
|
||||
gtsam::ParameterMatrix<15>}>
|
||||
double}>
|
||||
T at(size_t j);
|
||||
};
|
||||
|
||||
|
|
|
@ -4,9 +4,49 @@
|
|||
|
||||
import sys
|
||||
|
||||
from gtsam.utils import findExampleDataFile
|
||||
|
||||
from gtsam import gtsam, utils
|
||||
from gtsam.gtsam import *
|
||||
from gtsam.utils import findExampleDataFile
|
||||
|
||||
#### Typedefs to allow for backwards compatibility
|
||||
#TODO(Varun) deprecate in future release
|
||||
# gtsam
|
||||
KeyVector = list
|
||||
# base
|
||||
IndexPairSetMap = dict
|
||||
IndexPairVector = list
|
||||
# geometry
|
||||
Point2Vector = list
|
||||
Pose3Vector = list
|
||||
Rot3Vector = list
|
||||
Point2Pairs = list
|
||||
Point3Pairs = list
|
||||
Pose2Pairs = list
|
||||
Pose3Pairs = list
|
||||
# sfm
|
||||
BinaryMeasurementsPoint3 = list
|
||||
BinaryMeasurementsUnit3 = list
|
||||
BinaryMeasurementsRot3 = list
|
||||
KeyPairDoubleMap = dict
|
||||
SfmTrack2dVector = list
|
||||
SfmTracks = list
|
||||
SfmCameras = list
|
||||
SfmMeasurementVector = list
|
||||
MatchIndicesMap = dict
|
||||
KeypointsVector = list
|
||||
# slam
|
||||
BetweenFactorPose3s = list
|
||||
BetweenFactorPose2s = list
|
||||
|
||||
|
||||
class FixedLagSmootherKeyTimestampMap(dict):
|
||||
"""Class to provide backwards compatibility"""
|
||||
def insert(self, key_value):
|
||||
self[key_value[0]] = key_value[1]
|
||||
|
||||
|
||||
#### End typedefs
|
||||
|
||||
|
||||
def _init():
|
||||
|
|
|
@ -11,11 +11,12 @@ Refactored: Roderick Koehle
|
|||
"""
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam.symbol_shorthand import K, L, P
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
import gtsam
|
||||
|
||||
|
||||
def ulp(ftype=np.float64):
|
||||
"""
|
||||
|
@ -166,7 +167,7 @@ class TestCal3Fisheye(GtsamTestCase):
|
|||
pose = gtsam.Pose3()
|
||||
camera = gtsam.Cal3Fisheye()
|
||||
state = gtsam.Values()
|
||||
camera_key, pose_key, landmark_key = K(0), P(0), L(0)
|
||||
pose_key, landmark_key = P(0), L(0)
|
||||
state.insert_point3(landmark_key, obj_point)
|
||||
state.insert_pose3(pose_key, pose)
|
||||
g = gtsam.NonlinearFactorGraph()
|
||||
|
|
|
@ -10,11 +10,12 @@ Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
|||
"""
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam.symbol_shorthand import K, L, P
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
import gtsam
|
||||
|
||||
|
||||
class TestCal3Unified(GtsamTestCase):
|
||||
|
||||
|
|
|
@ -14,7 +14,6 @@ import numpy as np
|
|||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
import gtsam
|
||||
import gtsam_unstable
|
||||
|
||||
|
||||
class TestFixedLagSmootherExample(GtsamTestCase):
|
||||
|
|
|
@ -14,11 +14,12 @@ from __future__ import print_function
|
|||
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam.symbol_shorthand import X
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
import gtsam
|
||||
|
||||
|
||||
def create_graph():
|
||||
"""Create a basic linear factor graph for testing"""
|
||||
|
@ -40,6 +41,7 @@ def create_graph():
|
|||
|
||||
class TestGaussianFactorGraph(GtsamTestCase):
|
||||
"""Tests for Gaussian Factor Graphs."""
|
||||
|
||||
def test_fg(self):
|
||||
"""Test solving a linear factor graph"""
|
||||
graph, X = create_graph()
|
||||
|
@ -98,12 +100,11 @@ class TestGaussianFactorGraph(GtsamTestCase):
|
|||
bn = gfg.eliminateSequential(ordering)
|
||||
self.assertEqual(bn.size(), 3)
|
||||
|
||||
keyVector = []
|
||||
keyVector.append(keys[2])
|
||||
#TODO(Varun) Below code causes segfault in Debug config
|
||||
# ordering = gtsam.Ordering.ColamdConstrainedLastGaussianFactorGraph(gfg, keyVector)
|
||||
# bn = gfg.eliminateSequential(ordering)
|
||||
# self.assertEqual(bn.size(), 3)
|
||||
keyVector = [keys[2]]
|
||||
ordering = gtsam.Ordering.ColamdConstrainedLastGaussianFactorGraph(
|
||||
gfg, keyVector)
|
||||
bn = gfg.eliminateSequential(ordering)
|
||||
self.assertEqual(bn.size(), 3)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -13,15 +13,15 @@ Author: Frank Dellaert
|
|||
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam import Rot3
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
import gtsam
|
||||
from gtsam import Rot3
|
||||
|
||||
KEY = 0
|
||||
MODEL = gtsam.noiseModel.Unit.Create(3)
|
||||
|
||||
|
||||
# Rot3 version
|
||||
R = Rot3.Expmap(np.array([0.1, 0, 0]))
|
||||
|
||||
|
@ -29,8 +29,10 @@ R = Rot3.Expmap(np.array([0.1, 0, 0]))
|
|||
class TestKarcherMean(GtsamTestCase):
|
||||
|
||||
def test_find(self):
|
||||
# Check that optimizing for Karcher mean (which minimizes Between distance)
|
||||
# gets correct result.
|
||||
"""
|
||||
Check that optimizing for Karcher mean (which minimizes Between distance)
|
||||
gets correct result.
|
||||
"""
|
||||
rotations = [R, R.inverse()]
|
||||
expected = Rot3()
|
||||
actual = gtsam.FindKarcherMean(rotations)
|
||||
|
@ -69,8 +71,7 @@ class TestKarcherMean(GtsamTestCase):
|
|||
result = gtsam.GaussNewtonOptimizer(graph, initial).optimize()
|
||||
actual = gtsam.FindKarcherMean([result.atRot3(1), result.atRot3(2)])
|
||||
self.gtsamAssertEquals(expected, actual)
|
||||
self.gtsamAssertEquals(
|
||||
R12, result.atRot3(1).between(result.atRot3(2)))
|
||||
self.gtsamAssertEquals(R12, result.atRot3(1).between(result.atRot3(2)))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -12,12 +12,14 @@ import math
|
|||
import unittest
|
||||
|
||||
import numpy as np
|
||||
from gtsam import Point2, Pose2
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
from gtsam import Point2, Point2Pairs, Pose2
|
||||
|
||||
|
||||
class TestPose2(GtsamTestCase):
|
||||
"""Test selected Pose2 methods."""
|
||||
|
||||
def test_adjoint(self) -> None:
|
||||
"""Test adjoint method."""
|
||||
xi = np.array([1, 2, 3])
|
||||
|
|
|
@ -12,11 +12,12 @@ Author: Frank Dellaert, Duy Nguyen Ta
|
|||
import math
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam import Point3, Pose3, Rot3
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
import gtsam
|
||||
from gtsam import Point3, Pose3, Rot3
|
||||
|
||||
|
||||
def numerical_derivative_pose(pose, method, delta=1e-5):
|
||||
jacobian = np.zeros((6, 6))
|
||||
|
|
|
@ -12,12 +12,13 @@ Author: Frank Dellaert
|
|||
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
import gtsam
|
||||
from gtsam import (BetweenFactorPose2, LevenbergMarquardtParams, Pose2, Rot2,
|
||||
ShonanAveraging2, ShonanAveraging3,
|
||||
ShonanAveragingParameters2, ShonanAveragingParameters3)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
DEFAULT_PARAMS = ShonanAveragingParameters3(
|
||||
gtsam.LevenbergMarquardtParams.CeresDefaults()
|
||||
|
@ -140,7 +141,6 @@ class TestShonanAveraging(GtsamTestCase):
|
|||
result, _lambdaMin = shonan.run(initial, 3, 3)
|
||||
self.assertAlmostEqual(0.0015, shonan.cost(result), places=3)
|
||||
|
||||
|
||||
def test_constructorBetweenFactorPose2s(self) -> None:
|
||||
"""Check if ShonanAveraging2 constructor works when not initialized from g2o file.
|
||||
|
||||
|
|
|
@ -12,9 +12,10 @@ Author: John Lambert
|
|||
import unittest
|
||||
|
||||
import numpy as np
|
||||
from gtsam import Pose2, Rot2, Similarity2
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
from gtsam import Pose2, Rot2, Similarity2
|
||||
|
||||
|
||||
class TestSim2(GtsamTestCase):
|
||||
"""Test selected Sim2 methods."""
|
||||
|
@ -55,7 +56,7 @@ class TestSim2(GtsamTestCase):
|
|||
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
|
||||
|
||||
def test_align_poses_along_straight_line_gauge(self):
|
||||
"""Test if Align Pose3Pairs method can account for gauge ambiguity.
|
||||
"""Test if Pose2 Align method can account for gauge ambiguity.
|
||||
|
||||
Scenario:
|
||||
3 object poses
|
||||
|
@ -90,7 +91,7 @@ class TestSim2(GtsamTestCase):
|
|||
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
|
||||
|
||||
def test_align_poses_scaled_squares(self):
|
||||
"""Test if Align Pose2Pairs method can account for gauge ambiguity.
|
||||
"""Test if Align method can account for gauge ambiguity.
|
||||
|
||||
Make sure a big and small square can be aligned.
|
||||
The u's represent a big square (10x10), and v's represents a small square (4x4).
|
||||
|
|
|
@ -12,17 +12,18 @@ Author: John Lambert
|
|||
import unittest
|
||||
from typing import List, Optional
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam import Point3, Pose3, Rot3, Similarity3
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
import gtsam
|
||||
from gtsam import Point3, Pose3, Rot3, Similarity3
|
||||
|
||||
|
||||
class TestSim3(GtsamTestCase):
|
||||
"""Test selected Sim3 methods."""
|
||||
|
||||
def test_align_poses_along_straight_line(self):
|
||||
"""Test Align Pose3Pairs method.
|
||||
"""Test Pose3 Align method.
|
||||
|
||||
Scenario:
|
||||
3 object poses
|
||||
|
@ -57,7 +58,7 @@ class TestSim3(GtsamTestCase):
|
|||
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
|
||||
|
||||
def test_align_poses_along_straight_line_gauge(self):
|
||||
"""Test if Align Pose3Pairs method can account for gauge ambiguity.
|
||||
"""Test if Pose3 Align method can account for gauge ambiguity.
|
||||
|
||||
Scenario:
|
||||
3 object poses
|
||||
|
@ -92,7 +93,7 @@ class TestSim3(GtsamTestCase):
|
|||
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
|
||||
|
||||
def test_align_poses_scaled_squares(self):
|
||||
"""Test if Align Pose3Pairs method can account for gauge ambiguity.
|
||||
"""Test if Pose3 Align method can account for gauge ambiguity.
|
||||
|
||||
Make sure a big and small square can be aligned.
|
||||
The u's represent a big square (10x10), and v's represents a small square (4x4).
|
||||
|
|
|
@ -12,13 +12,14 @@ Authors: Frank Dellaert & Fan Jiang (Python) & Sushmita Warrier & John Lambert
|
|||
import unittest
|
||||
from typing import Iterable, List, Optional, Tuple, Union
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
import gtsam
|
||||
from gtsam import (Cal3_S2, Cal3Bundler, CameraSetCal3_S2,
|
||||
CameraSetCal3Bundler, PinholeCameraCal3_S2,
|
||||
PinholeCameraCal3Bundler, Point2, Point3, Pose3, Rot3,
|
||||
TriangulationParameters, TriangulationResult)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2)
|
||||
|
||||
|
|
|
@ -0,0 +1,897 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Unit tests to ensure backwards compatibility of the Python wrapper.
|
||||
Author: Varun Agrawal
|
||||
"""
|
||||
import unittest
|
||||
from typing import Iterable, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
from gtsam.gtsfm import Keypoints
|
||||
from gtsam.symbol_shorthand import X
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
import gtsam
|
||||
from gtsam import (BetweenFactorPose2, Cal3_S2, Cal3Bundler, CameraSetCal3_S2,
|
||||
CameraSetCal3Bundler, IndexPair, LevenbergMarquardtParams,
|
||||
PinholeCameraCal3_S2, PinholeCameraCal3Bundler, Point2,
|
||||
Point2Pairs, Point3, Pose2, Pose2Pairs, Pose3, Rot2, Rot3,
|
||||
SfmTrack2d, ShonanAveraging2, ShonanAveragingParameters2,
|
||||
Similarity2, Similarity3, TriangulationParameters,
|
||||
TriangulationResult)
|
||||
|
||||
UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2)
|
||||
|
||||
|
||||
class TestBackwardsCompatibility(GtsamTestCase):
|
||||
"""Tests for the backwards compatibility for the Python wrapper."""
|
||||
|
||||
def setUp(self):
|
||||
"""Setup test fixtures"""
|
||||
p1 = [-1.0, 0.0, -1.0]
|
||||
p2 = [1.0, 0.0, -1.0]
|
||||
q1 = Rot3(1.0, 0.0, 0.0, 0.0)
|
||||
q2 = Rot3(1.0, 0.0, 0.0, 0.0)
|
||||
pose1 = Pose3(q1, p1)
|
||||
pose2 = Pose3(q2, p2)
|
||||
camera1 = gtsam.PinholeCameraCal3Fisheye(pose1)
|
||||
camera2 = gtsam.PinholeCameraCal3Fisheye(pose2)
|
||||
self.origin = np.array([0.0, 0.0, 0.0])
|
||||
self.poses = gtsam.Pose3Vector([pose1, pose2])
|
||||
|
||||
self.fisheye_cameras = gtsam.CameraSetCal3Fisheye([camera1, camera2])
|
||||
self.fisheye_measurements = gtsam.Point2Vector(
|
||||
[k.project(self.origin) for k in self.fisheye_cameras])
|
||||
|
||||
fx, fy, s, u0, v0 = 2, 2, 0, 0, 0
|
||||
k1, k2, p1, p2 = 0, 0, 0, 0
|
||||
xi = 1
|
||||
self.stereographic = gtsam.Cal3Unified(fx, fy, s, u0, v0, k1, k2, p1,
|
||||
p2, xi)
|
||||
camera1 = gtsam.PinholeCameraCal3Unified(pose1, self.stereographic)
|
||||
camera2 = gtsam.PinholeCameraCal3Unified(pose2, self.stereographic)
|
||||
self.unified_cameras = gtsam.CameraSetCal3Unified([camera1, camera2])
|
||||
self.unified_measurements = gtsam.Point2Vector(
|
||||
[k.project(self.origin) for k in self.unified_cameras])
|
||||
|
||||
## Set up two camera poses
|
||||
# Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
pose1 = Pose3(UPRIGHT, Point3(0, 0, 1))
|
||||
|
||||
# create second camera 1 meter to the right of first camera
|
||||
pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0)))
|
||||
# twoPoses
|
||||
self.triangulation_poses = gtsam.Pose3Vector()
|
||||
self.triangulation_poses.append(pose1)
|
||||
self.triangulation_poses.append(pose2)
|
||||
|
||||
# landmark ~5 meters infront of camera
|
||||
self.landmark = Point3(5, 0.5, 1.2)
|
||||
|
||||
def test_Cal3Fisheye_triangulation_rectify(self):
|
||||
"""
|
||||
Estimate spatial point from image measurements using
|
||||
rectification from a Cal3Fisheye camera model.
|
||||
"""
|
||||
rectified = gtsam.Point2Vector([
|
||||
k.calibration().calibrate(pt)
|
||||
for k, pt in zip(self.fisheye_cameras, self.fisheye_measurements)
|
||||
])
|
||||
shared_cal = gtsam.Cal3_S2()
|
||||
triangulated = gtsam.triangulatePoint3(self.poses,
|
||||
shared_cal,
|
||||
rectified,
|
||||
rank_tol=1e-9,
|
||||
optimize=False)
|
||||
self.gtsamAssertEquals(triangulated, self.origin)
|
||||
|
||||
def test_Cal3Unified_triangulation_rectify(self):
|
||||
"""
|
||||
Estimate spatial point from image measurements using
|
||||
rectification from a Cal3Unified camera model.
|
||||
"""
|
||||
rectified = gtsam.Point2Vector([
|
||||
k.calibration().calibrate(pt)
|
||||
for k, pt in zip(self.unified_cameras, self.unified_measurements)
|
||||
])
|
||||
shared_cal = gtsam.Cal3_S2()
|
||||
triangulated = gtsam.triangulatePoint3(self.poses,
|
||||
shared_cal,
|
||||
rectified,
|
||||
rank_tol=1e-9,
|
||||
optimize=False)
|
||||
self.gtsamAssertEquals(triangulated, self.origin)
|
||||
|
||||
def test_track_generation(self) -> None:
|
||||
"""Ensures that DSF generates three tracks from measurements
|
||||
in 3 images (H=200,W=400)."""
|
||||
kps_i0 = Keypoints(np.array([[10.0, 20], [30, 40]]))
|
||||
kps_i1 = Keypoints(np.array([[50.0, 60], [70, 80], [90, 100]]))
|
||||
kps_i2 = Keypoints(np.array([[110.0, 120], [130, 140]]))
|
||||
|
||||
keypoints_list = gtsam.KeypointsVector()
|
||||
keypoints_list.append(kps_i0)
|
||||
keypoints_list.append(kps_i1)
|
||||
keypoints_list.append(kps_i2)
|
||||
|
||||
# For each image pair (i1,i2), we provide a (K,2) matrix
|
||||
# of corresponding image indices (k1,k2).
|
||||
matches_dict = gtsam.MatchIndicesMap()
|
||||
matches_dict[IndexPair(0, 1)] = np.array([[0, 0], [1, 1]])
|
||||
matches_dict[IndexPair(1, 2)] = np.array([[2, 0], [1, 1]])
|
||||
|
||||
tracks = gtsam.gtsfm.tracksFromPairwiseMatches(
|
||||
matches_dict,
|
||||
keypoints_list,
|
||||
verbose=False,
|
||||
)
|
||||
assert len(tracks) == 3
|
||||
|
||||
# Verify track 0.
|
||||
track0 = tracks[0]
|
||||
assert track0.numberMeasurements() == 2
|
||||
np.testing.assert_allclose(track0.measurements[0][1], Point2(10, 20))
|
||||
np.testing.assert_allclose(track0.measurements[1][1], Point2(50, 60))
|
||||
assert track0.measurements[0][0] == 0
|
||||
assert track0.measurements[1][0] == 1
|
||||
np.testing.assert_allclose(
|
||||
track0.measurementMatrix(),
|
||||
[
|
||||
[10, 20],
|
||||
[50, 60],
|
||||
],
|
||||
)
|
||||
np.testing.assert_allclose(track0.indexVector(), [0, 1])
|
||||
|
||||
# Verify track 1.
|
||||
track1 = tracks[1]
|
||||
np.testing.assert_allclose(
|
||||
track1.measurementMatrix(),
|
||||
[
|
||||
[30, 40],
|
||||
[70, 80],
|
||||
[130, 140],
|
||||
],
|
||||
)
|
||||
np.testing.assert_allclose(track1.indexVector(), [0, 1, 2])
|
||||
|
||||
# Verify track 2.
|
||||
track2 = tracks[2]
|
||||
np.testing.assert_allclose(
|
||||
track2.measurementMatrix(),
|
||||
[
|
||||
[90, 100],
|
||||
[110, 120],
|
||||
],
|
||||
)
|
||||
np.testing.assert_allclose(track2.indexVector(), [1, 2])
|
||||
|
||||
def test_sfm_track_2d_constructor(self) -> None:
|
||||
"""Test construction of 2D SfM track."""
|
||||
measurements = gtsam.SfmMeasurementVector()
|
||||
measurements.append((0, Point2(10, 20)))
|
||||
track = SfmTrack2d(measurements=measurements)
|
||||
track.measurement(0)
|
||||
assert track.numberMeasurements() == 1
|
||||
|
||||
def test_FixedLagSmootherExample(self):
|
||||
'''
|
||||
Simple test that checks for equality between C++ example
|
||||
file and the Python implementation. See
|
||||
gtsam_unstable/examples/FixedLagSmootherExample.cpp
|
||||
'''
|
||||
# Define a batch fixed lag smoother, which uses
|
||||
# Levenberg-Marquardt to perform the nonlinear optimization
|
||||
lag = 2.0
|
||||
smoother_batch = gtsam.BatchFixedLagSmoother(lag)
|
||||
|
||||
# Create containers to store the factors and linearization points
|
||||
# that will be sent to the smoothers
|
||||
new_factors = gtsam.NonlinearFactorGraph()
|
||||
new_values = gtsam.Values()
|
||||
new_timestamps = gtsam.FixedLagSmootherKeyTimestampMap()
|
||||
|
||||
# Create a prior on the first pose, placing it at the origin
|
||||
prior_mean = Pose2(0, 0, 0)
|
||||
prior_noise = gtsam.noiseModel.Diagonal.Sigmas(
|
||||
np.array([0.3, 0.3, 0.1]))
|
||||
X1 = 0
|
||||
new_factors.push_back(
|
||||
gtsam.PriorFactorPose2(X1, prior_mean, prior_noise))
|
||||
new_values.insert(X1, prior_mean)
|
||||
new_timestamps.insert((X1, 0.0))
|
||||
|
||||
delta_time = 0.25
|
||||
time = 0.25
|
||||
|
||||
i = 0
|
||||
|
||||
ground_truth = [
|
||||
Pose2(0.995821, 0.0231012, 0.0300001),
|
||||
Pose2(1.49284, 0.0457247, 0.045),
|
||||
Pose2(1.98981, 0.0758879, 0.06),
|
||||
Pose2(2.48627, 0.113502, 0.075),
|
||||
Pose2(2.98211, 0.158558, 0.09),
|
||||
Pose2(3.47722, 0.211047, 0.105),
|
||||
Pose2(3.97149, 0.270956, 0.12),
|
||||
Pose2(4.4648, 0.338272, 0.135),
|
||||
Pose2(4.95705, 0.41298, 0.15),
|
||||
Pose2(5.44812, 0.495063, 0.165),
|
||||
Pose2(5.9379, 0.584503, 0.18),
|
||||
]
|
||||
|
||||
# Iterates from 0.25s to 3.0s, adding 0.25s each loop
|
||||
# In each iteration, the agent moves at a constant speed
|
||||
# and its two odometers measure the change. The smoothed
|
||||
# result is then compared to the ground truth
|
||||
while time <= 3.0:
|
||||
previous_key = int(1000 * (time - delta_time))
|
||||
current_key = int(1000 * time)
|
||||
|
||||
# assign current key to the current timestamp
|
||||
new_timestamps.insert((current_key, time))
|
||||
|
||||
# Add a guess for this pose to the new values
|
||||
# Assume that the robot moves at 2 m/s. Position is time[s] *
|
||||
# 2[m/s]
|
||||
current_pose = Pose2(time * 2, 0, 0)
|
||||
new_values.insert(current_key, current_pose)
|
||||
|
||||
# Add odometry factors from two different sources with different
|
||||
# error stats
|
||||
odometry_measurement_1 = Pose2(0.61, -0.08, 0.02)
|
||||
odometry_noise_1 = gtsam.noiseModel.Diagonal.Sigmas(
|
||||
np.array([0.1, 0.1, 0.05]))
|
||||
new_factors.push_back(
|
||||
gtsam.BetweenFactorPose2(previous_key, current_key,
|
||||
odometry_measurement_1,
|
||||
odometry_noise_1))
|
||||
|
||||
odometry_measurement_2 = Pose2(0.47, 0.03, 0.01)
|
||||
odometry_noise_2 = gtsam.noiseModel.Diagonal.Sigmas(
|
||||
np.array([0.05, 0.05, 0.05]))
|
||||
new_factors.push_back(
|
||||
gtsam.BetweenFactorPose2(previous_key, current_key,
|
||||
odometry_measurement_2,
|
||||
odometry_noise_2))
|
||||
|
||||
# Update the smoothers with the new factors. In this case,
|
||||
# one iteration must pass for Levenberg-Marquardt to accurately
|
||||
# estimate
|
||||
if time >= 0.50:
|
||||
smoother_batch.update(new_factors, new_values, new_timestamps)
|
||||
|
||||
estimate = smoother_batch.calculateEstimatePose2(current_key)
|
||||
self.assertTrue(estimate.equals(ground_truth[i], 1e-4))
|
||||
i += 1
|
||||
|
||||
new_timestamps.clear()
|
||||
new_values.clear()
|
||||
new_factors.resize(0)
|
||||
|
||||
time += delta_time
|
||||
|
||||
def test_ordering(self):
|
||||
"""Test ordering"""
|
||||
gfg = gtsam.GaussianFactorGraph()
|
||||
|
||||
x0 = X(0)
|
||||
x1 = X(1)
|
||||
x2 = X(2)
|
||||
|
||||
BETWEEN_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1))
|
||||
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1))
|
||||
|
||||
gfg.add(x1, np.eye(1), x0, -np.eye(1), np.ones(1), BETWEEN_NOISE)
|
||||
gfg.add(x2, np.eye(1), x1, -np.eye(1), 2 * np.ones(1), BETWEEN_NOISE)
|
||||
gfg.add(x0, np.eye(1), np.zeros(1), PRIOR_NOISE)
|
||||
|
||||
keys = (x0, x1, x2)
|
||||
ordering = gtsam.Ordering()
|
||||
for key in keys[::-1]:
|
||||
ordering.push_back(key)
|
||||
|
||||
bn = gfg.eliminateSequential(ordering)
|
||||
self.assertEqual(bn.size(), 3)
|
||||
|
||||
keyVector = gtsam.KeyVector()
|
||||
keyVector.append(keys[2])
|
||||
ordering = gtsam.Ordering.ColamdConstrainedLastGaussianFactorGraph(
|
||||
gfg, keyVector)
|
||||
bn = gfg.eliminateSequential(ordering)
|
||||
self.assertEqual(bn.size(), 3)
|
||||
|
||||
def test_find(self):
|
||||
"""
|
||||
Check that optimizing for Karcher mean (which minimizes Between distance)
|
||||
gets correct result.
|
||||
"""
|
||||
R = Rot3.Expmap(np.array([0.1, 0, 0]))
|
||||
|
||||
rotations = gtsam.Rot3Vector([R, R.inverse()])
|
||||
expected = Rot3()
|
||||
actual = gtsam.FindKarcherMean(rotations)
|
||||
self.gtsamAssertEquals(expected, actual)
|
||||
|
||||
def test_find_karcher_mean_identity(self):
|
||||
"""Averaging 3 identity rotations should yield the identity."""
|
||||
a1Rb1 = Rot3()
|
||||
a2Rb2 = Rot3()
|
||||
a3Rb3 = Rot3()
|
||||
|
||||
aRb_list = gtsam.Rot3Vector([a1Rb1, a2Rb2, a3Rb3])
|
||||
aRb_expected = Rot3()
|
||||
|
||||
aRb = gtsam.FindKarcherMean(aRb_list)
|
||||
self.gtsamAssertEquals(aRb, aRb_expected)
|
||||
|
||||
def test_factor(self):
|
||||
"""Check that the InnerConstraint factor leaves the mean unchanged."""
|
||||
# Make a graph with two variables, one between, and one InnerConstraint
|
||||
# The optimal result should satisfy the between, while moving the other
|
||||
# variable to make the mean the same as before.
|
||||
# Mean of R and R' is identity. Let's make a BetweenFactor making R21 =
|
||||
# R*R*R, i.e. geodesic length is 3 rather than 2.
|
||||
R = Rot3.Expmap(np.array([0.1, 0, 0]))
|
||||
MODEL = gtsam.noiseModel.Unit.Create(3)
|
||||
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
R12 = R.compose(R.compose(R))
|
||||
graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL))
|
||||
keys = gtsam.KeyVector()
|
||||
keys.append(1)
|
||||
keys.append(2)
|
||||
graph.add(gtsam.KarcherMeanFactorRot3(keys))
|
||||
|
||||
initial = gtsam.Values()
|
||||
initial.insert(1, R.inverse())
|
||||
initial.insert(2, R)
|
||||
expected = Rot3()
|
||||
|
||||
result = gtsam.GaussNewtonOptimizer(graph, initial).optimize()
|
||||
actual = gtsam.FindKarcherMean(
|
||||
gtsam.Rot3Vector([result.atRot3(1),
|
||||
result.atRot3(2)]))
|
||||
self.gtsamAssertEquals(expected, actual)
|
||||
self.gtsamAssertEquals(R12, result.atRot3(1).between(result.atRot3(2)))
|
||||
|
||||
def test_align(self) -> None:
|
||||
"""Ensure estimation of the Pose2 element to align two 2d point clouds succeeds.
|
||||
|
||||
Two point clouds represent horseshoe-shapes of the same size, just rotated and translated:
|
||||
|
||||
| X---X
|
||||
| |
|
||||
| X---X
|
||||
------------------
|
||||
|
|
||||
|
|
||||
O | O
|
||||
| | |
|
||||
O---O
|
||||
"""
|
||||
pts_a = [
|
||||
Point2(1, -3),
|
||||
Point2(1, -5),
|
||||
Point2(-1, -5),
|
||||
Point2(-1, -3),
|
||||
]
|
||||
pts_b = [
|
||||
Point2(3, 1),
|
||||
Point2(1, 1),
|
||||
Point2(1, 3),
|
||||
Point2(3, 3),
|
||||
]
|
||||
|
||||
ab_pairs = Point2Pairs(list(zip(pts_a, pts_b)))
|
||||
aTb = Pose2.Align(ab_pairs)
|
||||
self.assertIsNotNone(aTb)
|
||||
|
||||
for pt_a, pt_b in zip(pts_a, pts_b):
|
||||
pt_a_ = aTb.transformFrom(pt_b)
|
||||
np.testing.assert_allclose(pt_a, pt_a_)
|
||||
|
||||
# Matrix version
|
||||
A = np.array(pts_a).T
|
||||
B = np.array(pts_b).T
|
||||
aTb = Pose2.Align(A, B)
|
||||
self.assertIsNotNone(aTb)
|
||||
|
||||
for pt_a, pt_b in zip(pts_a, pts_b):
|
||||
pt_a_ = aTb.transformFrom(pt_b)
|
||||
np.testing.assert_allclose(pt_a, pt_a_)
|
||||
|
||||
def test_align_squares(self):
|
||||
"""Test if Align method can align 2 squares."""
|
||||
square = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]],
|
||||
float).T
|
||||
sTt = Pose3(Rot3.Rodrigues(0, 0, -np.pi), gtsam.Point3(2, 4, 0))
|
||||
transformed = sTt.transformTo(square)
|
||||
|
||||
st_pairs = gtsam.Point3Pairs()
|
||||
for j in range(4):
|
||||
st_pairs.append((square[:, j], transformed[:, j]))
|
||||
|
||||
# Recover the transformation sTt
|
||||
estimated_sTt = Pose3.Align(st_pairs)
|
||||
self.gtsamAssertEquals(estimated_sTt, sTt, 1e-10)
|
||||
|
||||
# Matrix version
|
||||
estimated_sTt = Pose3.Align(square, transformed)
|
||||
self.gtsamAssertEquals(estimated_sTt, sTt, 1e-10)
|
||||
|
||||
def test_constructorBetweenFactorPose2s(self) -> None:
|
||||
"""Check if ShonanAveraging2 constructor works when not initialized from g2o file.
|
||||
|
||||
GT pose graph:
|
||||
|
||||
| cam 1 = (0,4)
|
||||
--o
|
||||
| .
|
||||
. .
|
||||
. .
|
||||
| |
|
||||
o-- ... o--
|
||||
cam 0 cam 2 = (4,0)
|
||||
(0,0)
|
||||
"""
|
||||
num_images = 3
|
||||
|
||||
wTi_list = [
|
||||
Pose2(Rot2.fromDegrees(0), np.array([0, 0])),
|
||||
Pose2(Rot2.fromDegrees(90), np.array([0, 4])),
|
||||
Pose2(Rot2.fromDegrees(0), np.array([4, 0])),
|
||||
]
|
||||
|
||||
edges = [(0, 1), (1, 2), (0, 2)]
|
||||
i2Ri1_dict = {(i1, i2):
|
||||
wTi_list[i2].inverse().compose(wTi_list[i1]).rotation()
|
||||
for (i1, i2) in edges}
|
||||
|
||||
lm_params = LevenbergMarquardtParams.CeresDefaults()
|
||||
shonan_params = ShonanAveragingParameters2(lm_params)
|
||||
shonan_params.setUseHuber(False)
|
||||
shonan_params.setCertifyOptimality(True)
|
||||
|
||||
noise_model = gtsam.noiseModel.Unit.Create(3)
|
||||
between_factors = gtsam.BetweenFactorPose2s()
|
||||
for (i1, i2), i2Ri1 in i2Ri1_dict.items():
|
||||
i2Ti1 = Pose2(i2Ri1, np.zeros(2))
|
||||
between_factors.append(
|
||||
BetweenFactorPose2(i2, i1, i2Ti1, noise_model))
|
||||
|
||||
obj = ShonanAveraging2(between_factors, shonan_params)
|
||||
initial = obj.initializeRandomly()
|
||||
result_values, _ = obj.run(initial, min_p=2, max_p=100)
|
||||
|
||||
wRi_list = [result_values.atRot2(i) for i in range(num_images)]
|
||||
thetas_deg = np.array([wRi.degrees() for wRi in wRi_list])
|
||||
|
||||
# map all angles to [0,360)
|
||||
thetas_deg = thetas_deg % 360
|
||||
thetas_deg -= thetas_deg[0]
|
||||
|
||||
expected_thetas_deg = np.array([0.0, 90.0, 0.0])
|
||||
np.testing.assert_allclose(thetas_deg, expected_thetas_deg, atol=0.1)
|
||||
|
||||
def test_align_poses2_along_straight_line(self) -> None:
|
||||
"""Test Align of list of Pose2Pair.
|
||||
|
||||
Scenario:
|
||||
3 object poses
|
||||
same scale (no gauge ambiguity)
|
||||
world frame has poses rotated about 180 degrees.
|
||||
world and egovehicle frame translated by 15 meters w.r.t. each other
|
||||
"""
|
||||
R180 = Rot2.fromDegrees(180)
|
||||
|
||||
# Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
|
||||
# Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
|
||||
eTo0 = Pose2(Rot2(), np.array([5, 0]))
|
||||
eTo1 = Pose2(Rot2(), np.array([10, 0]))
|
||||
eTo2 = Pose2(Rot2(), np.array([15, 0]))
|
||||
|
||||
eToi_list = [eTo0, eTo1, eTo2]
|
||||
|
||||
# Create destination poses
|
||||
# (same three objects, but instead living in the world "w" frame)
|
||||
wTo0 = Pose2(R180, np.array([-10, 0]))
|
||||
wTo1 = Pose2(R180, np.array([-5, 0]))
|
||||
wTo2 = Pose2(R180, np.array([0, 0]))
|
||||
|
||||
wToi_list = [wTo0, wTo1, wTo2]
|
||||
|
||||
we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list)))
|
||||
|
||||
# Recover the transformation wSe (i.e. world_S_egovehicle)
|
||||
wSe = Similarity2.Align(we_pairs)
|
||||
|
||||
for wToi, eToi in zip(wToi_list, eToi_list):
|
||||
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
|
||||
|
||||
def test_align_poses2_along_straight_line_gauge(self):
|
||||
"""Test if Align Pose2Pairs method can account for gauge ambiguity.
|
||||
|
||||
Scenario:
|
||||
3 object poses
|
||||
with gauge ambiguity (2x scale)
|
||||
world frame has poses rotated by 90 degrees.
|
||||
world and egovehicle frame translated by 11 meters w.r.t. each other
|
||||
"""
|
||||
R90 = Rot2.fromDegrees(90)
|
||||
|
||||
# Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
|
||||
# Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
|
||||
eTo0 = Pose2(Rot2(), np.array([1, 0]))
|
||||
eTo1 = Pose2(Rot2(), np.array([2, 0]))
|
||||
eTo2 = Pose2(Rot2(), np.array([4, 0]))
|
||||
|
||||
eToi_list = [eTo0, eTo1, eTo2]
|
||||
|
||||
# Create destination poses
|
||||
# (same three objects, but instead living in the world/city "w" frame)
|
||||
wTo0 = Pose2(R90, np.array([0, 12]))
|
||||
wTo1 = Pose2(R90, np.array([0, 14]))
|
||||
wTo2 = Pose2(R90, np.array([0, 18]))
|
||||
|
||||
wToi_list = [wTo0, wTo1, wTo2]
|
||||
|
||||
we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list)))
|
||||
|
||||
# Recover the transformation wSe (i.e. world_S_egovehicle)
|
||||
wSe = Similarity2.Align(we_pairs)
|
||||
|
||||
for wToi, eToi in zip(wToi_list, eToi_list):
|
||||
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
|
||||
|
||||
def test_align_poses2_scaled_squares(self):
|
||||
"""Test if Align Pose2Pairs method can account for gauge ambiguity.
|
||||
|
||||
Make sure a big and small square can be aligned.
|
||||
The u's represent a big square (10x10), and v's represents a small square (4x4).
|
||||
|
||||
Scenario:
|
||||
4 object poses
|
||||
with gauge ambiguity (2.5x scale)
|
||||
"""
|
||||
# 0, 90, 180, and 270 degrees yaw
|
||||
R0 = Rot2.fromDegrees(0)
|
||||
R90 = Rot2.fromDegrees(90)
|
||||
R180 = Rot2.fromDegrees(180)
|
||||
R270 = Rot2.fromDegrees(270)
|
||||
|
||||
aTi0 = Pose2(R0, np.array([2, 3]))
|
||||
aTi1 = Pose2(R90, np.array([12, 3]))
|
||||
aTi2 = Pose2(R180, np.array([12, 13]))
|
||||
aTi3 = Pose2(R270, np.array([2, 13]))
|
||||
|
||||
aTi_list = [aTi0, aTi1, aTi2, aTi3]
|
||||
|
||||
bTi0 = Pose2(R0, np.array([4, 3]))
|
||||
bTi1 = Pose2(R90, np.array([8, 3]))
|
||||
bTi2 = Pose2(R180, np.array([8, 7]))
|
||||
bTi3 = Pose2(R270, np.array([4, 7]))
|
||||
|
||||
bTi_list = [bTi0, bTi1, bTi2, bTi3]
|
||||
|
||||
ab_pairs = Pose2Pairs(list(zip(aTi_list, bTi_list)))
|
||||
|
||||
# Recover the transformation wSe (i.e. world_S_egovehicle)
|
||||
aSb = Similarity2.Align(ab_pairs)
|
||||
|
||||
for aTi, bTi in zip(aTi_list, bTi_list):
|
||||
self.gtsamAssertEquals(aTi, aSb.transformFrom(bTi))
|
||||
|
||||
def test_align_poses3_along_straight_line(self):
|
||||
"""Test Align Pose3Pairs method.
|
||||
|
||||
Scenario:
|
||||
3 object poses
|
||||
same scale (no gauge ambiguity)
|
||||
world frame has poses rotated about x-axis (90 degree roll)
|
||||
world and egovehicle frame translated by 15 meters w.r.t. each other
|
||||
"""
|
||||
Rx90 = Rot3.Rx(np.deg2rad(90))
|
||||
|
||||
# Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
|
||||
# Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
|
||||
eTo0 = Pose3(Rot3(), np.array([5, 0, 0]))
|
||||
eTo1 = Pose3(Rot3(), np.array([10, 0, 0]))
|
||||
eTo2 = Pose3(Rot3(), np.array([15, 0, 0]))
|
||||
|
||||
eToi_list = [eTo0, eTo1, eTo2]
|
||||
|
||||
# Create destination poses
|
||||
# (same three objects, but instead living in the world/city "w" frame)
|
||||
wTo0 = Pose3(Rx90, np.array([-10, 0, 0]))
|
||||
wTo1 = Pose3(Rx90, np.array([-5, 0, 0]))
|
||||
wTo2 = Pose3(Rx90, np.array([0, 0, 0]))
|
||||
|
||||
wToi_list = [wTo0, wTo1, wTo2]
|
||||
|
||||
we_pairs = gtsam.Pose3Pairs(list(zip(wToi_list, eToi_list)))
|
||||
|
||||
# Recover the transformation wSe (i.e. world_S_egovehicle)
|
||||
wSe = Similarity3.Align(we_pairs)
|
||||
|
||||
for wToi, eToi in zip(wToi_list, eToi_list):
|
||||
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
|
||||
|
||||
def test_align_poses3_along_straight_line_gauge(self):
|
||||
"""Test if Align Pose3Pairs method can account for gauge ambiguity.
|
||||
|
||||
Scenario:
|
||||
3 object poses
|
||||
with gauge ambiguity (2x scale)
|
||||
world frame has poses rotated about z-axis (90 degree yaw)
|
||||
world and egovehicle frame translated by 11 meters w.r.t. each other
|
||||
"""
|
||||
Rz90 = Rot3.Rz(np.deg2rad(90))
|
||||
|
||||
# Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
|
||||
# Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
|
||||
eTo0 = Pose3(Rot3(), np.array([1, 0, 0]))
|
||||
eTo1 = Pose3(Rot3(), np.array([2, 0, 0]))
|
||||
eTo2 = Pose3(Rot3(), np.array([4, 0, 0]))
|
||||
|
||||
eToi_list = [eTo0, eTo1, eTo2]
|
||||
|
||||
# Create destination poses
|
||||
# (same three objects, but instead living in the world/city "w" frame)
|
||||
wTo0 = Pose3(Rz90, np.array([0, 12, 0]))
|
||||
wTo1 = Pose3(Rz90, np.array([0, 14, 0]))
|
||||
wTo2 = Pose3(Rz90, np.array([0, 18, 0]))
|
||||
|
||||
wToi_list = [wTo0, wTo1, wTo2]
|
||||
|
||||
we_pairs = gtsam.Pose3Pairs(list(zip(wToi_list, eToi_list)))
|
||||
|
||||
# Recover the transformation wSe (i.e. world_S_egovehicle)
|
||||
wSe = Similarity3.Align(we_pairs)
|
||||
|
||||
for wToi, eToi in zip(wToi_list, eToi_list):
|
||||
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
|
||||
|
||||
def test_align_poses3_scaled_squares(self):
|
||||
"""Test if Align Pose3Pairs method can account for gauge ambiguity.
|
||||
|
||||
Make sure a big and small square can be aligned.
|
||||
The u's represent a big square (10x10), and v's represents a small square (4x4).
|
||||
|
||||
Scenario:
|
||||
4 object poses
|
||||
with gauge ambiguity (2.5x scale)
|
||||
"""
|
||||
# 0, 90, 180, and 270 degrees yaw
|
||||
R0 = Rot3.Rz(np.deg2rad(0))
|
||||
R90 = Rot3.Rz(np.deg2rad(90))
|
||||
R180 = Rot3.Rz(np.deg2rad(180))
|
||||
R270 = Rot3.Rz(np.deg2rad(270))
|
||||
|
||||
aTi0 = Pose3(R0, np.array([2, 3, 0]))
|
||||
aTi1 = Pose3(R90, np.array([12, 3, 0]))
|
||||
aTi2 = Pose3(R180, np.array([12, 13, 0]))
|
||||
aTi3 = Pose3(R270, np.array([2, 13, 0]))
|
||||
|
||||
aTi_list = [aTi0, aTi1, aTi2, aTi3]
|
||||
|
||||
bTi0 = Pose3(R0, np.array([4, 3, 0]))
|
||||
bTi1 = Pose3(R90, np.array([8, 3, 0]))
|
||||
bTi2 = Pose3(R180, np.array([8, 7, 0]))
|
||||
bTi3 = Pose3(R270, np.array([4, 7, 0]))
|
||||
|
||||
bTi_list = [bTi0, bTi1, bTi2, bTi3]
|
||||
|
||||
ab_pairs = gtsam.Pose3Pairs(list(zip(aTi_list, bTi_list)))
|
||||
|
||||
# Recover the transformation wSe (i.e. world_S_egovehicle)
|
||||
aSb = Similarity3.Align(ab_pairs)
|
||||
|
||||
for aTi, bTi in zip(aTi_list, bTi_list):
|
||||
self.gtsamAssertEquals(aTi, aSb.transformFrom(bTi))
|
||||
|
||||
def generate_measurements(
|
||||
self,
|
||||
calibration: Union[Cal3Bundler, Cal3_S2],
|
||||
camera_model: Union[PinholeCameraCal3Bundler, PinholeCameraCal3_S2],
|
||||
cal_params: Iterable[Iterable[Union[int, float]]],
|
||||
camera_set: Optional[Union[CameraSetCal3Bundler,
|
||||
CameraSetCal3_S2]] = None,
|
||||
) -> Tuple[List[Point2], Union[CameraSetCal3Bundler, CameraSetCal3_S2,
|
||||
List[Cal3Bundler], List[Cal3_S2]]]:
|
||||
"""
|
||||
Generate vector of measurements for given calibration and camera model.
|
||||
|
||||
Args:
|
||||
calibration: Camera calibration e.g. Cal3_S2
|
||||
camera_model: Camera model e.g. PinholeCameraCal3_S2
|
||||
cal_params: Iterable of camera parameters for `calibration` e.g. [K1, K2]
|
||||
camera_set: Cameraset object (for individual calibrations)
|
||||
|
||||
Returns:
|
||||
list of measurements and list/CameraSet object for cameras
|
||||
"""
|
||||
if camera_set is not None:
|
||||
cameras = camera_set()
|
||||
else:
|
||||
cameras = []
|
||||
measurements = gtsam.Point2Vector()
|
||||
|
||||
for k, pose in zip(cal_params, self.triangulation_poses):
|
||||
K = calibration(*k)
|
||||
camera = camera_model(pose, K)
|
||||
cameras.append(camera)
|
||||
z = camera.project(self.landmark)
|
||||
measurements.append(z)
|
||||
|
||||
return measurements, cameras
|
||||
|
||||
def test_TriangulationExample(self) -> None:
|
||||
"""Tests triangulation with shared Cal3_S2 calibration"""
|
||||
# Some common constants
|
||||
sharedCal = (1500, 1200, 0, 640, 480)
|
||||
|
||||
measurements, _ = self.generate_measurements(
|
||||
calibration=Cal3_S2,
|
||||
camera_model=PinholeCameraCal3_S2,
|
||||
cal_params=(sharedCal, sharedCal))
|
||||
|
||||
triangulated_landmark = gtsam.triangulatePoint3(
|
||||
self.triangulation_poses,
|
||||
Cal3_S2(sharedCal),
|
||||
measurements,
|
||||
rank_tol=1e-9,
|
||||
optimize=True)
|
||||
self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-9)
|
||||
|
||||
# Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814)
|
||||
measurements_noisy = gtsam.Point2Vector()
|
||||
measurements_noisy.append(measurements[0] - np.array([0.1, 0.5]))
|
||||
measurements_noisy.append(measurements[1] - np.array([-0.2, 0.3]))
|
||||
|
||||
triangulated_landmark = gtsam.triangulatePoint3(
|
||||
self.triangulation_poses,
|
||||
Cal3_S2(sharedCal),
|
||||
measurements_noisy,
|
||||
rank_tol=1e-9,
|
||||
optimize=True)
|
||||
|
||||
self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-2)
|
||||
|
||||
def test_triangulation_robust_three_poses(self) -> None:
|
||||
"""Ensure triangulation with a robust model works."""
|
||||
sharedCal = Cal3_S2(1500, 1200, 0, 640, 480)
|
||||
|
||||
# landmark ~5 meters infront of camera
|
||||
landmark = Point3(5, 0.5, 1.2)
|
||||
|
||||
pose1 = Pose3(UPRIGHT, Point3(0, 0, 1))
|
||||
pose2 = pose1 * Pose3(Rot3(), Point3(1, 0, 0))
|
||||
pose3 = pose1 * Pose3(Rot3.Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -0.1))
|
||||
|
||||
camera1 = PinholeCameraCal3_S2(pose1, sharedCal)
|
||||
camera2 = PinholeCameraCal3_S2(pose2, sharedCal)
|
||||
camera3 = PinholeCameraCal3_S2(pose3, sharedCal)
|
||||
|
||||
z1: Point2 = camera1.project(landmark)
|
||||
z2: Point2 = camera2.project(landmark)
|
||||
z3: Point2 = camera3.project(landmark)
|
||||
|
||||
poses = gtsam.Pose3Vector([pose1, pose2, pose3])
|
||||
measurements = gtsam.Point2Vector([z1, z2, z3])
|
||||
|
||||
# noise free, so should give exactly the landmark
|
||||
actual = gtsam.triangulatePoint3(poses,
|
||||
sharedCal,
|
||||
measurements,
|
||||
rank_tol=1e-9,
|
||||
optimize=False)
|
||||
self.assertTrue(np.allclose(landmark, actual, atol=1e-2))
|
||||
|
||||
# Add outlier
|
||||
measurements[0] += Point2(100, 120) # very large pixel noise!
|
||||
|
||||
# now estimate does not match landmark
|
||||
actual2 = gtsam.triangulatePoint3(poses,
|
||||
sharedCal,
|
||||
measurements,
|
||||
rank_tol=1e-9,
|
||||
optimize=False)
|
||||
# DLT is surprisingly robust, but still off (actual error is around 0.26m)
|
||||
self.assertTrue(np.linalg.norm(landmark - actual2) >= 0.2)
|
||||
self.assertTrue(np.linalg.norm(landmark - actual2) <= 0.5)
|
||||
|
||||
# Again with nonlinear optimization
|
||||
actual3 = gtsam.triangulatePoint3(poses,
|
||||
sharedCal,
|
||||
measurements,
|
||||
rank_tol=1e-9,
|
||||
optimize=True)
|
||||
# result from nonlinear (but non-robust optimization) is close to DLT and still off
|
||||
self.assertTrue(np.allclose(actual2, actual3, atol=0.1))
|
||||
|
||||
# Again with nonlinear optimization, this time with robust loss
|
||||
model = gtsam.noiseModel.Robust.Create(
|
||||
gtsam.noiseModel.mEstimator.Huber.Create(1.345),
|
||||
gtsam.noiseModel.Unit.Create(2))
|
||||
actual4 = gtsam.triangulatePoint3(poses,
|
||||
sharedCal,
|
||||
measurements,
|
||||
rank_tol=1e-9,
|
||||
optimize=True,
|
||||
model=model)
|
||||
# using the Huber loss we now have a quite small error!! nice!
|
||||
self.assertTrue(np.allclose(landmark, actual4, atol=0.05))
|
||||
|
||||
def test_outliers_and_far_landmarks(self) -> None:
|
||||
"""Check safe triangulation function."""
|
||||
pose1, pose2 = self.poses
|
||||
|
||||
K1 = Cal3_S2(1500, 1200, 0, 640, 480)
|
||||
# create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
camera1 = PinholeCameraCal3_S2(pose1, K1)
|
||||
|
||||
# create second camera 1 meter to the right of first camera
|
||||
K2 = Cal3_S2(1600, 1300, 0, 650, 440)
|
||||
camera2 = PinholeCameraCal3_S2(pose2, K2)
|
||||
|
||||
# 1. Project two landmarks into two cameras and triangulate
|
||||
z1 = camera1.project(self.landmark)
|
||||
z2 = camera2.project(self.landmark)
|
||||
|
||||
cameras = CameraSetCal3_S2()
|
||||
cameras.append(camera1)
|
||||
cameras.append(camera2)
|
||||
|
||||
measurements = gtsam.Point2Vector()
|
||||
measurements.append(z1)
|
||||
measurements.append(z2)
|
||||
|
||||
landmarkDistanceThreshold = 10 # landmark is closer than that
|
||||
# all default except landmarkDistanceThreshold:
|
||||
params = TriangulationParameters(1.0, False, landmarkDistanceThreshold)
|
||||
actual: TriangulationResult = gtsam.triangulateSafe(
|
||||
cameras, measurements, params)
|
||||
self.gtsamAssertEquals(actual.get(), self.landmark, 1e-2)
|
||||
self.assertTrue(actual.valid())
|
||||
|
||||
landmarkDistanceThreshold = 4 # landmark is farther than that
|
||||
params2 = TriangulationParameters(1.0, False,
|
||||
landmarkDistanceThreshold)
|
||||
actual = gtsam.triangulateSafe(cameras, measurements, params2)
|
||||
self.assertTrue(actual.farPoint())
|
||||
|
||||
# 3. Add a slightly rotated third camera above with a wrong measurement
|
||||
# (OUTLIER)
|
||||
pose3 = pose1 * Pose3(Rot3.Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1))
|
||||
K3 = Cal3_S2(700, 500, 0, 640, 480)
|
||||
camera3 = PinholeCameraCal3_S2(pose3, K3)
|
||||
z3 = camera3.project(self.landmark)
|
||||
|
||||
cameras.append(camera3)
|
||||
measurements.append(z3 + Point2(10, -10))
|
||||
|
||||
landmarkDistanceThreshold = 10 # landmark is closer than that
|
||||
outlierThreshold = 100 # loose, the outlier is going to pass
|
||||
params3 = TriangulationParameters(1.0, False,
|
||||
landmarkDistanceThreshold,
|
||||
outlierThreshold)
|
||||
actual = gtsam.triangulateSafe(cameras, measurements, params3)
|
||||
self.assertTrue(actual.valid())
|
||||
|
||||
# now set stricter threshold for outlier rejection
|
||||
outlierThreshold = 5 # tighter, the outlier is not going to pass
|
||||
params4 = TriangulationParameters(1.0, False,
|
||||
landmarkDistanceThreshold,
|
||||
outlierThreshold)
|
||||
actual = gtsam.triangulateSafe(cameras, measurements, params4)
|
||||
self.assertTrue(actual.outlier())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -13,7 +13,7 @@ if (NOT GTSAM_USE_BOOST_FEATURES)
|
|||
endif()
|
||||
|
||||
if (NOT GTSAM_ENABLE_BOOST_SERIALIZATION)
|
||||
list(APPEND excluded_tests "testSerializationSLAM.cpp")
|
||||
list(APPEND excluded_tests "testSerializationSlam.cpp")
|
||||
endif()
|
||||
|
||||
# Build tests
|
||||
|
|
Loading…
Reference in New Issue