Merge branch 'develop' into fix-1496

release/4.3a0
Varun Agrawal 2023-06-26 14:45:46 -04:00
commit c605a5b211
68 changed files with 1581 additions and 1017 deletions

View File

@ -21,24 +21,15 @@ jobs:
# Github Actions requires a single row to be added to the build matrix.
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
name: [
#TODO This build fails, need to understand why.
# windows-2016-cl,
windows-2019-cl,
]
build_type: [
Debug,
#TODO(Varun) The release build takes over 2.5 hours, need to figure out why.
# Release
Release
]
build_unstable: [ON]
include:
#TODO This build fails, need to understand why.
# - name: windows-2016-cl
# os: windows-2016
# compiler: cl
# platform: 32
- name: windows-2019-cl
os: windows-2019
compiler: cl
@ -125,4 +116,3 @@ jobs:
# Run GTSAM_UNSTABLE tests
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base_unstable

View File

@ -41,6 +41,7 @@ class DSFMap {
std::map<KEY, This::Set> sets();
};
// Used in Matlab wrapper
class IndexPairSet {
IndexPairSet();
// common STL methods
@ -54,6 +55,7 @@ class IndexPairSet {
bool count(gtsam::IndexPair key) const; // returns true if value exists
};
// Used in Matlab wrapper
class IndexPairVector {
IndexPairVector();
IndexPairVector(const gtsam::IndexPairVector& other);
@ -70,6 +72,7 @@ class IndexPairVector {
gtsam::IndexPairVector IndexPairSetAsArray(gtsam::IndexPairSet& set);
// Used in Matlab wrapper
class IndexPairSetMap {
IndexPairSetMap();
// common STL methods

33
gtsam/basis/Basis.cpp Normal file
View File

@ -0,0 +1,33 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Basis.cpp
* @brief Compute an interpolating basis
* @author Varun Agrawal
* @date June 20, 2023
*/
#include <gtsam/basis/Basis.h>
namespace gtsam {
Matrix kroneckerProductIdentity(size_t M, const Weights& w) {
Matrix result(M, w.cols() * M);
result.setZero();
for (int i = 0; i < w.cols(); i++) {
result.block(0, i * M, M, M).diagonal().array() = w(i);
}
return result;
}
} // namespace gtsam

View File

@ -20,7 +20,6 @@
#include <gtsam/base/Matrix.h>
#include <gtsam/base/OptionalJacobian.h>
#include <gtsam/basis/ParameterMatrix.h>
#include <iostream>
@ -81,16 +80,7 @@ using Weights = Eigen::Matrix<double, 1, -1>; /* 1xN vector */
*
* @ingroup basis
*/
template <size_t M>
Matrix kroneckerProductIdentity(const Weights& w) {
Matrix result(M, w.cols() * M);
result.setZero();
for (int i = 0; i < w.cols(); i++) {
result.block(0, i * M, M, M).diagonal().array() = w(i);
}
return result;
}
Matrix kroneckerProductIdentity(size_t M, const Weights& w);
/**
* CRTP Base class for function bases
@ -169,18 +159,18 @@ class Basis {
};
/**
* VectorEvaluationFunctor at a given x, applied to ParameterMatrix<M>.
* VectorEvaluationFunctor at a given x, applied to a parameter Matrix.
* This functor is used to evaluate a parameterized function at a given scalar
* value x. When given a specific M*N parameters, returns an M-vector the M
* corresponding functions at x, possibly with Jacobians wrpt the parameters.
*/
template <int M>
class VectorEvaluationFunctor : protected EvaluationFunctor {
protected:
using VectorM = Eigen::Matrix<double, M, 1>;
using Jacobian = Eigen::Matrix<double, /*MxMN*/ M, -1>;
using Jacobian = Eigen::Matrix<double, /*MxMN*/ -1, -1>;
Jacobian H_;
size_t M_;
/**
* Calculate the `M*(M*N)` Jacobian of this functor with respect to
* the M*N parameter matrix `P`.
@ -190,7 +180,7 @@ class Basis {
* i.e., the Kronecker product of weights_ with the MxM identity matrix.
*/
void calculateJacobian() {
H_ = kroneckerProductIdentity<M>(this->weights_);
H_ = kroneckerProductIdentity(M_, this->weights_);
}
public:
@ -200,26 +190,27 @@ class Basis {
VectorEvaluationFunctor() {}
/// Default Constructor
VectorEvaluationFunctor(size_t N, double x) : EvaluationFunctor(N, x) {
VectorEvaluationFunctor(size_t M, size_t N, double x)
: EvaluationFunctor(N, x), M_(M) {
calculateJacobian();
}
/// Constructor, with interval [a,b]
VectorEvaluationFunctor(size_t N, double x, double a, double b)
: EvaluationFunctor(N, x, a, b) {
VectorEvaluationFunctor(size_t M, size_t N, double x, double a, double b)
: EvaluationFunctor(N, x, a, b), M_(M) {
calculateJacobian();
}
/// M-dimensional evaluation
VectorM apply(const ParameterMatrix<M>& P,
OptionalJacobian</*MxN*/ -1, -1> H = {}) const {
Vector apply(const Matrix& P,
OptionalJacobian</*MxN*/ -1, -1> H = {}) const {
if (H) *H = H_;
return P.matrix() * this->weights_.transpose();
}
/// c++ sugar
VectorM operator()(const ParameterMatrix<M>& P,
OptionalJacobian</*MxN*/ -1, -1> H = {}) const {
Vector operator()(const Matrix& P,
OptionalJacobian</*MxN*/ -1, -1> H = {}) const {
return apply(P, H);
}
};
@ -231,13 +222,14 @@ class Basis {
*
* This component is specified by the row index i, with 0<i<M.
*/
template <int M>
class VectorComponentFunctor : public EvaluationFunctor {
protected:
using Jacobian = Eigen::Matrix<double, /*1xMN*/ 1, -1>;
size_t rowIndex_;
Jacobian H_;
size_t M_;
size_t rowIndex_;
/*
* Calculate the `1*(M*N)` Jacobian of this functor with respect to
* the M*N parameter matrix `P`.
@ -248,9 +240,9 @@ class Basis {
* MxM identity matrix. See also VectorEvaluationFunctor.
*/
void calculateJacobian(size_t N) {
H_.setZero(1, M * N);
H_.setZero(1, M_ * N);
for (int j = 0; j < EvaluationFunctor::weights_.size(); j++)
H_(0, rowIndex_ + j * M) = EvaluationFunctor::weights_(j);
H_(0, rowIndex_ + j * M_) = EvaluationFunctor::weights_(j);
}
public:
@ -258,33 +250,34 @@ class Basis {
VectorComponentFunctor() {}
/// Construct with row index
VectorComponentFunctor(size_t N, size_t i, double x)
: EvaluationFunctor(N, x), rowIndex_(i) {
VectorComponentFunctor(size_t M, size_t N, size_t i, double x)
: EvaluationFunctor(N, x), M_(M), rowIndex_(i) {
calculateJacobian(N);
}
/// Construct with row index and interval
VectorComponentFunctor(size_t N, size_t i, double x, double a, double b)
: EvaluationFunctor(N, x, a, b), rowIndex_(i) {
VectorComponentFunctor(size_t M, size_t N, size_t i, double x, double a,
double b)
: EvaluationFunctor(N, x, a, b), M_(M), rowIndex_(i) {
calculateJacobian(N);
}
/// Calculate component of component rowIndex_ of P
double apply(const ParameterMatrix<M>& P,
double apply(const Matrix& P,
OptionalJacobian</*1xMN*/ -1, -1> H = {}) const {
if (H) *H = H_;
return P.row(rowIndex_) * EvaluationFunctor::weights_.transpose();
}
/// c++ sugar
double operator()(const ParameterMatrix<M>& P,
double operator()(const Matrix& P,
OptionalJacobian</*1xMN*/ -1, -1> H = {}) const {
return apply(P, H);
}
};
/**
* Manifold EvaluationFunctor at a given x, applied to ParameterMatrix<M>.
* Manifold EvaluationFunctor at a given x, applied to a parameter Matrix.
* This functor is used to evaluate a parameterized function at a given scalar
* value x. When given a specific M*N parameters, returns an M-vector the M
* corresponding functions at x, possibly with Jacobians wrpt the parameters.
@ -297,25 +290,23 @@ class Basis {
* 3D rotation.
*/
template <class T>
class ManifoldEvaluationFunctor
: public VectorEvaluationFunctor<traits<T>::dimension> {
class ManifoldEvaluationFunctor : public VectorEvaluationFunctor {
enum { M = traits<T>::dimension };
using Base = VectorEvaluationFunctor<M>;
using Base = VectorEvaluationFunctor;
public:
/// For serialization
ManifoldEvaluationFunctor() {}
/// Default Constructor
ManifoldEvaluationFunctor(size_t N, double x) : Base(N, x) {}
ManifoldEvaluationFunctor(size_t N, double x) : Base(M, N, x) {}
/// Constructor, with interval [a,b]
ManifoldEvaluationFunctor(size_t N, double x, double a, double b)
: Base(N, x, a, b) {}
: Base(M, N, x, a, b) {}
/// Manifold evaluation
T apply(const ParameterMatrix<M>& P,
OptionalJacobian</*MxMN*/ -1, -1> H = {}) const {
T apply(const Matrix& P, OptionalJacobian</*MxMN*/ -1, -1> H = {}) const {
// Interpolate the M-dimensional vector to yield a vector in tangent space
Eigen::Matrix<double, M, 1> xi = Base::operator()(P, H);
@ -333,7 +324,7 @@ class Basis {
}
/// c++ sugar
T operator()(const ParameterMatrix<M>& P,
T operator()(const Matrix& P,
OptionalJacobian</*MxN*/ -1, -1> H = {}) const {
return apply(P, H); // might call apply in derived
}
@ -389,20 +380,20 @@ class Basis {
};
/**
* VectorDerivativeFunctor at a given x, applied to ParameterMatrix<M>.
* VectorDerivativeFunctor at a given x, applied to a parameter Matrix.
*
* This functor is used to evaluate the derivatives of a parameterized
* function at a given scalar value x. When given a specific M*N parameters,
* returns an M-vector the M corresponding function derivatives at x, possibly
* with Jacobians wrpt the parameters.
*/
template <int M>
class VectorDerivativeFunctor : protected DerivativeFunctorBase {
protected:
using VectorM = Eigen::Matrix<double, M, 1>;
using Jacobian = Eigen::Matrix<double, /*MxMN*/ M, -1>;
using Jacobian = Eigen::Matrix<double, /*MxMN*/ -1, -1>;
Jacobian H_;
size_t M_;
/**
* Calculate the `M*(M*N)` Jacobian of this functor with respect to
* the M*N parameter matrix `P`.
@ -412,7 +403,7 @@ class Basis {
* i.e., the Kronecker product of weights_ with the MxM identity matrix.
*/
void calculateJacobian() {
H_ = kroneckerProductIdentity<M>(this->weights_);
H_ = kroneckerProductIdentity(M_, this->weights_);
}
public:
@ -422,25 +413,25 @@ class Basis {
VectorDerivativeFunctor() {}
/// Default Constructor
VectorDerivativeFunctor(size_t N, double x) : DerivativeFunctorBase(N, x) {
VectorDerivativeFunctor(size_t M, size_t N, double x)
: DerivativeFunctorBase(N, x), M_(M) {
calculateJacobian();
}
/// Constructor, with optional interval [a,b]
VectorDerivativeFunctor(size_t N, double x, double a, double b)
: DerivativeFunctorBase(N, x, a, b) {
VectorDerivativeFunctor(size_t M, size_t N, double x, double a, double b)
: DerivativeFunctorBase(N, x, a, b), M_(M) {
calculateJacobian();
}
VectorM apply(const ParameterMatrix<M>& P,
OptionalJacobian</*MxMN*/ -1, -1> H = {}) const {
Vector apply(const Matrix& P,
OptionalJacobian</*MxMN*/ -1, -1> H = {}) const {
if (H) *H = H_;
return P.matrix() * this->weights_.transpose();
}
/// c++ sugar
VectorM operator()(
const ParameterMatrix<M>& P,
OptionalJacobian</*MxMN*/ -1, -1> H = {}) const {
Vector operator()(const Matrix& P,
OptionalJacobian</*MxMN*/ -1, -1> H = {}) const {
return apply(P, H);
}
};
@ -452,13 +443,14 @@ class Basis {
*
* This component is specified by the row index i, with 0<i<M.
*/
template <int M>
class ComponentDerivativeFunctor : protected DerivativeFunctorBase {
protected:
using Jacobian = Eigen::Matrix<double, /*1xMN*/ 1, -1>;
size_t rowIndex_;
Jacobian H_;
size_t M_;
size_t rowIndex_;
/*
* Calculate the `1*(M*N)` Jacobian of this functor with respect to
* the M*N parameter matrix `P`.
@ -469,9 +461,9 @@ class Basis {
* MxM identity matrix. See also VectorDerivativeFunctor.
*/
void calculateJacobian(size_t N) {
H_.setZero(1, M * N);
H_.setZero(1, M_ * N);
for (int j = 0; j < this->weights_.size(); j++)
H_(0, rowIndex_ + j * M) = this->weights_(j);
H_(0, rowIndex_ + j * M_) = this->weights_(j);
}
public:
@ -479,29 +471,29 @@ class Basis {
ComponentDerivativeFunctor() {}
/// Construct with row index
ComponentDerivativeFunctor(size_t N, size_t i, double x)
: DerivativeFunctorBase(N, x), rowIndex_(i) {
ComponentDerivativeFunctor(size_t M, size_t N, size_t i, double x)
: DerivativeFunctorBase(N, x), M_(M), rowIndex_(i) {
calculateJacobian(N);
}
/// Construct with row index and interval
ComponentDerivativeFunctor(size_t N, size_t i, double x, double a, double b)
: DerivativeFunctorBase(N, x, a, b), rowIndex_(i) {
ComponentDerivativeFunctor(size_t M, size_t N, size_t i, double x, double a,
double b)
: DerivativeFunctorBase(N, x, a, b), M_(M), rowIndex_(i) {
calculateJacobian(N);
}
/// Calculate derivative of component rowIndex_ of F
double apply(const ParameterMatrix<M>& P,
double apply(const Matrix& P,
OptionalJacobian</*1xMN*/ -1, -1> H = {}) const {
if (H) *H = H_;
return P.row(rowIndex_) * this->weights_.transpose();
}
/// c++ sugar
double operator()(const ParameterMatrix<M>& P,
double operator()(const Matrix& P,
OptionalJacobian</*1xMN*/ -1, -1> H = {}) const {
return apply(P, H);
}
};
};
} // namespace gtsam

View File

@ -75,7 +75,7 @@ class EvaluationFactor : public FunctorizedFactor<double, Vector> {
};
/**
* Unary factor for enforcing BASIS polynomial evaluation on a ParameterMatrix
* Unary factor for enforcing BASIS polynomial evaluation on a parameter Matrix
* of size (M, N) is equal to a vector-valued measurement at the same point,
when
* using a pseudo-spectral parameterization.
@ -87,15 +87,13 @@ class EvaluationFactor : public FunctorizedFactor<double, Vector> {
* measurement prediction function.
*
* @param BASIS: The basis class to use e.g. Chebyshev2
* @param M: Size of the evaluated state vector.
*
* @ingroup basis
*/
template <class BASIS, int M>
class VectorEvaluationFactor
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
template <class BASIS>
class VectorEvaluationFactor : public FunctorizedFactor<Vector, Matrix> {
private:
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
using Base = FunctorizedFactor<Vector, Matrix>;
public:
VectorEvaluationFactor() {}
@ -103,42 +101,43 @@ class VectorEvaluationFactor
/**
* @brief Construct a new VectorEvaluationFactor object.
*
* @param key The key to the ParameterMatrix object used to represent the
* @param key The key to the parameter Matrix object used to represent the
* polynomial.
* @param z The measurement value.
* @param model The noise model.
* @param M Size of the evaluated state vector.
* @param N The degree of the polynomial.
* @param x The point at which to evaluate the basis polynomial.
*/
VectorEvaluationFactor(Key key, const Vector &z,
const SharedNoiseModel &model, const size_t N,
double x)
: Base(key, z, model,
typename BASIS::template VectorEvaluationFunctor<M>(N, x)) {}
const SharedNoiseModel &model, const size_t M,
const size_t N, double x)
: Base(key, z, model, typename BASIS::VectorEvaluationFunctor(M, N, x)) {}
/**
* @brief Construct a new VectorEvaluationFactor object.
*
* @param key The key to the ParameterMatrix object used to represent the
* @param key The key to the parameter Matrix object used to represent the
* polynomial.
* @param z The measurement value.
* @param model The noise model.
* @param M Size of the evaluated state vector.
* @param N The degree of the polynomial.
* @param x The point at which to evaluate the basis polynomial.
* @param a Lower bound for the polynomial.
* @param b Upper bound for the polynomial.
*/
VectorEvaluationFactor(Key key, const Vector &z,
const SharedNoiseModel &model, const size_t N,
double x, double a, double b)
const SharedNoiseModel &model, const size_t M,
const size_t N, double x, double a, double b)
: Base(key, z, model,
typename BASIS::template VectorEvaluationFunctor<M>(N, x, a, b)) {}
typename BASIS::VectorEvaluationFunctor(M, N, x, a, b)) {}
virtual ~VectorEvaluationFactor() {}
};
/**
* Unary factor for enforcing BASIS polynomial evaluation on a ParameterMatrix
* Unary factor for enforcing BASIS polynomial evaluation on a parameter Matrix
* of size (P, N) is equal to specified measurement at the same point, when
* using a pseudo-spectral parameterization.
*
@ -147,20 +146,18 @@ class VectorEvaluationFactor
* indexed by `i`.
*
* @param BASIS: The basis class to use e.g. Chebyshev2
* @param P: Size of the fixed-size vector.
*
* Example:
* VectorComponentFactor<BASIS, P> controlPrior(key, measured, model,
* N, i, t, a, b);
* VectorComponentFactor<BASIS> controlPrior(key, measured, model,
* N, i, t, a, b);
* where N is the degree and i is the component index.
*
* @ingroup basis
*/
template <class BASIS, size_t P>
class VectorComponentFactor
: public FunctorizedFactor<double, ParameterMatrix<P>> {
template <class BASIS>
class VectorComponentFactor : public FunctorizedFactor<double, Matrix> {
private:
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
using Base = FunctorizedFactor<double, Matrix>;
public:
VectorComponentFactor() {}
@ -168,29 +165,31 @@ class VectorComponentFactor
/**
* @brief Construct a new VectorComponentFactor object.
*
* @param key The key to the ParameterMatrix object used to represent the
* @param key The key to the parameter Matrix object used to represent the
* polynomial.
* @param z The scalar value at a specified index `i` of the full measurement
* vector.
* @param model The noise model.
* @param P Size of the fixed-size vector.
* @param N The degree of the polynomial.
* @param i The index for the evaluated vector to give us the desired scalar
* value.
* @param x The point at which to evaluate the basis polynomial.
*/
VectorComponentFactor(Key key, const double &z, const SharedNoiseModel &model,
const size_t N, size_t i, double x)
const size_t P, const size_t N, size_t i, double x)
: Base(key, z, model,
typename BASIS::template VectorComponentFunctor<P>(N, i, x)) {}
typename BASIS::VectorComponentFunctor(P, N, i, x)) {}
/**
* @brief Construct a new VectorComponentFactor object.
*
* @param key The key to the ParameterMatrix object used to represent the
* @param key The key to the parameter Matrix object used to represent the
* polynomial.
* @param z The scalar value at a specified index `i` of the full measurement
* vector.
* @param model The noise model.
* @param P Size of the fixed-size vector.
* @param N The degree of the polynomial.
* @param i The index for the evaluated vector to give us the desired scalar
* value.
@ -199,11 +198,10 @@ class VectorComponentFactor
* @param b Upper bound for the polynomial.
*/
VectorComponentFactor(Key key, const double &z, const SharedNoiseModel &model,
const size_t N, size_t i, double x, double a, double b)
: Base(
key, z, model,
typename BASIS::template VectorComponentFunctor<P>(N, i, x, a, b)) {
}
const size_t P, const size_t N, size_t i, double x,
double a, double b)
: Base(key, z, model,
typename BASIS::VectorComponentFunctor(P, N, i, x, a, b)) {}
virtual ~VectorComponentFactor() {}
};
@ -226,10 +224,9 @@ class VectorComponentFactor
* where `x` is the value (e.g. timestep) at which the rotation was evaluated.
*/
template <class BASIS, typename T>
class ManifoldEvaluationFactor
: public FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>> {
class ManifoldEvaluationFactor : public FunctorizedFactor<T, Matrix> {
private:
using Base = FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>>;
using Base = FunctorizedFactor<T, Matrix>;
public:
ManifoldEvaluationFactor() {}
@ -289,7 +286,7 @@ class DerivativeFactor
/**
* @brief Construct a new DerivativeFactor object.
*
* @param key The key to the ParameterMatrix which represents the basis
* @param key The key to the parameter Matrix which represents the basis
* polynomial.
* @param z The measurement value.
* @param model The noise model.
@ -303,7 +300,7 @@ class DerivativeFactor
/**
* @brief Construct a new DerivativeFactor object.
*
* @param key The key to the ParameterMatrix which represents the basis
* @param key The key to the parameter Matrix which represents the basis
* polynomial.
* @param z The measurement value.
* @param model The noise model.
@ -324,14 +321,12 @@ class DerivativeFactor
* polynomial at a specified point `x` is equal to the vector value `z`.
*
* @param BASIS: The basis class to use e.g. Chebyshev2
* @param M: Size of the evaluated state vector derivative.
*/
template <class BASIS, int M>
class VectorDerivativeFactor
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
template <class BASIS>
class VectorDerivativeFactor : public FunctorizedFactor<Vector, Matrix> {
private:
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
using Func = typename BASIS::template VectorDerivativeFunctor<M>;
using Base = FunctorizedFactor<Vector, Matrix>;
using Func = typename BASIS::VectorDerivativeFunctor;
public:
VectorDerivativeFactor() {}
@ -339,34 +334,36 @@ class VectorDerivativeFactor
/**
* @brief Construct a new VectorDerivativeFactor object.
*
* @param key The key to the ParameterMatrix which represents the basis
* @param key The key to the parameter Matrix which represents the basis
* polynomial.
* @param z The measurement value.
* @param model The noise model.
* @param M Size of the evaluated state vector derivative.
* @param N The degree of the polynomial.
* @param x The point at which to evaluate the basis polynomial.
*/
VectorDerivativeFactor(Key key, const Vector &z,
const SharedNoiseModel &model, const size_t N,
double x)
: Base(key, z, model, Func(N, x)) {}
const SharedNoiseModel &model, const size_t M,
const size_t N, double x)
: Base(key, z, model, Func(M, N, x)) {}
/**
* @brief Construct a new VectorDerivativeFactor object.
*
* @param key The key to the ParameterMatrix which represents the basis
* @param key The key to the parameter Matrix which represents the basis
* polynomial.
* @param z The measurement value.
* @param model The noise model.
* @param M Size of the evaluated state vector derivative.
* @param N The degree of the polynomial.
* @param x The point at which to evaluate the basis polynomial.
* @param a Lower bound for the polynomial.
* @param b Upper bound for the polynomial.
*/
VectorDerivativeFactor(Key key, const Vector &z,
const SharedNoiseModel &model, const size_t N,
double x, double a, double b)
: Base(key, z, model, Func(N, x, a, b)) {}
const SharedNoiseModel &model, const size_t M,
const size_t N, double x, double a, double b)
: Base(key, z, model, Func(M, N, x, a, b)) {}
virtual ~VectorDerivativeFactor() {}
};
@ -377,14 +374,12 @@ class VectorDerivativeFactor
* vector-valued measurement `z`.
*
* @param BASIS: The basis class to use e.g. Chebyshev2
* @param P: Size of the control component derivative.
*/
template <class BASIS, int P>
class ComponentDerivativeFactor
: public FunctorizedFactor<double, ParameterMatrix<P>> {
template <class BASIS>
class ComponentDerivativeFactor : public FunctorizedFactor<double, Matrix> {
private:
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
using Func = typename BASIS::template ComponentDerivativeFunctor<P>;
using Base = FunctorizedFactor<double, Matrix>;
using Func = typename BASIS::ComponentDerivativeFunctor;
public:
ComponentDerivativeFactor() {}
@ -392,29 +387,31 @@ class ComponentDerivativeFactor
/**
* @brief Construct a new ComponentDerivativeFactor object.
*
* @param key The key to the ParameterMatrix which represents the basis
* @param key The key to the parameter Matrix which represents the basis
* polynomial.
* @param z The scalar measurement value at a specific index `i` of the full
* measurement vector.
* @param model The degree of the polynomial.
* @param P: Size of the control component derivative.
* @param N The degree of the polynomial.
* @param i The index for the evaluated vector to give us the desired scalar
* value.
* @param x The point at which to evaluate the basis polynomial.
*/
ComponentDerivativeFactor(Key key, const double &z,
const SharedNoiseModel &model, const size_t N,
size_t i, double x)
: Base(key, z, model, Func(N, i, x)) {}
const SharedNoiseModel &model, const size_t P,
const size_t N, size_t i, double x)
: Base(key, z, model, Func(P, N, i, x)) {}
/**
* @brief Construct a new ComponentDerivativeFactor object.
*
* @param key The key to the ParameterMatrix which represents the basis
* @param key The key to the parameter Matrix which represents the basis
* polynomial.
* @param z The scalar measurement value at a specific index `i` of the full
* measurement vector.
* @param model The degree of the polynomial.
* @param P: Size of the control component derivative.
* @param N The degree of the polynomial.
* @param i The index for the evaluated vector to give us the desired scalar
* value.
@ -423,9 +420,10 @@ class ComponentDerivativeFactor
* @param b Upper bound for the polynomial.
*/
ComponentDerivativeFactor(Key key, const double &z,
const SharedNoiseModel &model, const size_t N,
size_t i, double x, double a, double b)
: Base(key, z, model, Func(N, i, x, a, b)) {}
const SharedNoiseModel &model, const size_t P,
const size_t N, size_t i, double x, double a,
double b)
: Base(key, z, model, Func(P, N, i, x, a, b)) {}
virtual ~ComponentDerivativeFactor() {}
};

View File

@ -1,215 +0,0 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file ParameterMatrix.h
* @brief Define ParameterMatrix class which is used to store values at
* interpolation points.
* @author Varun Agrawal, Frank Dellaert
* @date September 21, 2020
*/
#pragma once
#include <gtsam/base/Matrix.h>
#include <gtsam/base/Testable.h>
#include <gtsam/base/VectorSpace.h>
#include <iostream>
namespace gtsam {
/**
* A matrix abstraction of MxN values at the Basis points.
* This class serves as a wrapper over an Eigen matrix.
* @tparam M: The dimension of the type you wish to evaluate.
* @param N: the number of Basis points (e.g. Chebyshev points of the second
* kind).
*/
template <int M>
class ParameterMatrix {
using MatrixType = Eigen::Matrix<double, M, -1>;
private:
MatrixType matrix_;
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
enum { dimension = Eigen::Dynamic };
/**
* Create ParameterMatrix using the number of basis points.
* @param N: The number of basis points (the columns).
*/
ParameterMatrix(const size_t N) : matrix_(M, N) { matrix_.setZero(); }
/**
* Create ParameterMatrix from an MxN Eigen Matrix.
* @param matrix: An Eigen matrix used to initialze the ParameterMatrix.
*/
ParameterMatrix(const MatrixType& matrix) : matrix_(matrix) {}
/// Get the number of rows.
size_t rows() const { return matrix_.rows(); }
/// Get the number of columns.
size_t cols() const { return matrix_.cols(); }
/// Get the underlying matrix.
MatrixType matrix() const { return matrix_; }
/// Return the tranpose of the underlying matrix.
Eigen::Matrix<double, -1, M> transpose() const { return matrix_.transpose(); }
/**
* Get the matrix row specified by `index`.
* @param index: The row index to retrieve.
*/
Eigen::Matrix<double, 1, -1> row(size_t index) const {
return matrix_.row(index);
}
/**
* Set the matrix row specified by `index`.
* @param index: The row index to set.
*/
auto row(size_t index) -> Eigen::Block<MatrixType, 1, -1, false> {
return matrix_.row(index);
}
/**
* Get the matrix column specified by `index`.
* @param index: The column index to retrieve.
*/
Eigen::Matrix<double, M, 1> col(size_t index) const {
return matrix_.col(index);
}
/**
* Set the matrix column specified by `index`.
* @param index: The column index to set.
*/
auto col(size_t index) -> Eigen::Block<MatrixType, M, 1, true> {
return matrix_.col(index);
}
/**
* Set all matrix coefficients to zero.
*/
void setZero() { matrix_.setZero(); }
/**
* Add a ParameterMatrix to another.
* @param other: ParameterMatrix to add.
*/
ParameterMatrix<M> operator+(const ParameterMatrix<M>& other) const {
return ParameterMatrix<M>(matrix_ + other.matrix());
}
/**
* Add a MxN-sized vector to the ParameterMatrix.
* @param other: Vector which is reshaped and added.
*/
ParameterMatrix<M> operator+(
const Eigen::Matrix<double, -1, 1>& other) const {
// This form avoids a deep copy and instead typecasts `other`.
Eigen::Map<const MatrixType> other_(other.data(), M, cols());
return ParameterMatrix<M>(matrix_ + other_);
}
/**
* Subtract a ParameterMatrix from another.
* @param other: ParameterMatrix to subtract.
*/
ParameterMatrix<M> operator-(const ParameterMatrix<M>& other) const {
return ParameterMatrix<M>(matrix_ - other.matrix());
}
/**
* Subtract a MxN-sized vector from the ParameterMatrix.
* @param other: Vector which is reshaped and subracted.
*/
ParameterMatrix<M> operator-(
const Eigen::Matrix<double, -1, 1>& other) const {
Eigen::Map<const MatrixType> other_(other.data(), M, cols());
return ParameterMatrix<M>(matrix_ - other_);
}
/**
* Multiply ParameterMatrix with an Eigen matrix.
* @param other: Eigen matrix which should be multiplication compatible with
* the ParameterMatrix.
*/
MatrixType operator*(const Eigen::Matrix<double, -1, -1>& other) const {
return matrix_ * other;
}
/// @name Vector Space requirements
/// @{
/**
* Print the ParameterMatrix.
* @param s: The prepend string to add more contextual info.
*/
void print(const std::string& s = "") const {
std::cout << (s == "" ? s : s + " ") << matrix_ << std::endl;
}
/**
* Check for equality up to absolute tolerance.
* @param other: The ParameterMatrix to check equality with.
* @param tol: The absolute tolerance threshold.
*/
bool equals(const ParameterMatrix<M>& other, double tol = 1e-8) const {
return gtsam::equal_with_abs_tol(matrix_, other.matrix(), tol);
}
/// Returns dimensionality of the tangent space
inline size_t dim() const { return matrix_.size(); }
/// Convert to vector form, is done row-wise
inline Vector vector() const {
using RowMajor = Eigen::Matrix<double, -1, -1, Eigen::RowMajor>;
Vector result(matrix_.size());
Eigen::Map<RowMajor>(&result(0), rows(), cols()) = matrix_;
return result;
}
/** Identity function to satisfy VectorSpace traits.
*
* NOTE: The size at compile time is unknown so this identity is zero
* length and thus not valid.
*/
inline static ParameterMatrix Identity() {
// throw std::runtime_error(
// "ParameterMatrix::Identity(): Don't use this function");
return ParameterMatrix(0);
}
/// @}
};
// traits for ParameterMatrix
template <int M>
struct traits<ParameterMatrix<M>>
: public internal::VectorSpace<ParameterMatrix<M>> {};
/* ************************************************************************* */
// Stream operator that takes a ParameterMatrix. Used for printing.
template <int M>
inline std::ostream& operator<<(std::ostream& os,
const ParameterMatrix<M>& parameterMatrix) {
os << parameterMatrix.matrix();
return os;
}
} // namespace gtsam

View File

@ -46,18 +46,6 @@ class Chebyshev2 {
static Matrix DifferentiationMatrix(size_t N, double a, double b);
};
#include <gtsam/basis/ParameterMatrix.h>
template <M = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}>
class ParameterMatrix {
ParameterMatrix(const size_t N);
ParameterMatrix(const Matrix& matrix);
Matrix matrix() const;
void print(const string& s = "") const;
};
#include <gtsam/basis/BasisFactors.h>
template <BASIS = {gtsam::Chebyshev2, gtsam::Chebyshev1Basis,
@ -72,45 +60,36 @@ virtual class EvaluationFactor : gtsam::NoiseModelFactor {
double x, double a, double b);
};
template <BASIS, M>
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
virtual class VectorEvaluationFactor : gtsam::NoiseModelFactor {
VectorEvaluationFactor();
VectorEvaluationFactor(gtsam::Key key, const Vector& z,
const gtsam::noiseModel::Base* model, const size_t N,
double x);
const gtsam::noiseModel::Base* model, const size_t M,
const size_t N, double x);
VectorEvaluationFactor(gtsam::Key key, const Vector& z,
const gtsam::noiseModel::Base* model, const size_t N,
double x, double a, double b);
const gtsam::noiseModel::Base* model, const size_t M,
const size_t N, double x, double a, double b);
};
// TODO(Varun) Better way to support arbitrary dimensions?
// Especially if users mainly do `pip install gtsam` for the Python wrapper.
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 3>
VectorEvaluationFactorChebyshev2D3;
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 4>
VectorEvaluationFactorChebyshev2D4;
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 12>
VectorEvaluationFactorChebyshev2D12;
template <BASIS, P>
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
virtual class VectorComponentFactor : gtsam::NoiseModelFactor {
VectorComponentFactor();
VectorComponentFactor(gtsam::Key key, const double z,
const gtsam::noiseModel::Base* model, const size_t N,
size_t i, double x);
const gtsam::noiseModel::Base* model, const size_t M,
const size_t N, size_t i, double x);
VectorComponentFactor(gtsam::Key key, const double z,
const gtsam::noiseModel::Base* model, const size_t N,
size_t i, double x, double a, double b);
const gtsam::noiseModel::Base* model, const size_t M,
const size_t N, size_t i, double x, double a, double b);
};
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 3>
VectorComponentFactorChebyshev2D3;
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 4>
VectorComponentFactorChebyshev2D4;
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 12>
VectorComponentFactorChebyshev2D12;
#include <gtsam/geometry/Pose2.h>
#include <gtsam/geometry/Pose3.h>
template <BASIS, T>
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
gtsam::Chebyshev2Basis, gtsam::Chebyshev2},
T = {gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3}>
virtual class ManifoldEvaluationFactor : gtsam::NoiseModelFactor {
ManifoldEvaluationFactor();
ManifoldEvaluationFactor(gtsam::Key key, const T& z,
@ -121,8 +100,42 @@ virtual class ManifoldEvaluationFactor : gtsam::NoiseModelFactor {
double x, double a, double b);
};
// TODO(gerry): Add `DerivativeFactor`, `VectorDerivativeFactor`, and
// `ComponentDerivativeFactor`
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
virtual class DerivativeFactor : gtsam::NoiseModelFactor {
DerivativeFactor();
DerivativeFactor(gtsam::Key key, const double z,
const gtsam::noiseModel::Base* model, const size_t N,
double x);
DerivativeFactor(gtsam::Key key, const double z,
const gtsam::noiseModel::Base* model, const size_t N,
double x, double a, double b);
};
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
virtual class VectorDerivativeFactor : gtsam::NoiseModelFactor {
VectorDerivativeFactor();
VectorDerivativeFactor(gtsam::Key key, const Vector& z,
const gtsam::noiseModel::Base* model, const size_t M,
const size_t N, double x);
VectorDerivativeFactor(gtsam::Key key, const Vector& z,
const gtsam::noiseModel::Base* model, const size_t M,
const size_t N, double x, double a, double b);
};
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
virtual class ComponentDerivativeFactor : gtsam::NoiseModelFactor {
ComponentDerivativeFactor();
ComponentDerivativeFactor(gtsam::Key key, const double z,
const gtsam::noiseModel::Base* model,
const size_t P, const size_t N, size_t i, double x);
ComponentDerivativeFactor(gtsam::Key key, const double z,
const gtsam::noiseModel::Base* model,
const size_t P, const size_t N, size_t i, double x,
double a, double b);
};
#include <gtsam/basis/FitBasis.h>
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,

View File

@ -17,30 +17,28 @@
* @brief unit tests for factors in BasisFactors.h
*/
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h>
#include <gtsam/base/TestableAssertions.h>
#include <gtsam/base/Vector.h>
#include <gtsam/basis/Basis.h>
#include <gtsam/basis/BasisFactors.h>
#include <gtsam/basis/Chebyshev2.h>
#include <gtsam/geometry/Pose2.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/nonlinear/FunctorizedFactor.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/nonlinear/factorTesting.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/base/Testable.h>
#include <gtsam/base/TestableAssertions.h>
#include <gtsam/base/Vector.h>
#include <CppUnitLite/TestHarness.h>
using gtsam::noiseModel::Isotropic;
using gtsam::Pose2;
using gtsam::Vector;
using gtsam::Values;
using gtsam::Chebyshev2;
using gtsam::ParameterMatrix;
using gtsam::LevenbergMarquardtParams;
using gtsam::LevenbergMarquardtOptimizer;
using gtsam::LevenbergMarquardtParams;
using gtsam::NonlinearFactorGraph;
using gtsam::NonlinearOptimizerParams;
using gtsam::Pose2;
using gtsam::Values;
using gtsam::Vector;
using gtsam::noiseModel::Isotropic;
constexpr size_t N = 2;
@ -81,15 +79,15 @@ TEST(BasisFactors, VectorEvaluationFactor) {
const Vector measured = Vector::Zero(M);
auto model = Isotropic::Sigma(M, 1.0);
VectorEvaluationFactor<Chebyshev2, M> factor(key, measured, model, N, 0);
VectorEvaluationFactor<Chebyshev2> factor(key, measured, model, M, N, 0);
NonlinearFactorGraph graph;
graph.add(factor);
ParameterMatrix<M> stateMatrix(N);
gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(M, N);
Values initial;
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
initial.insert<gtsam::Matrix>(key, stateMatrix);
LevenbergMarquardtParams parameters;
parameters.setMaxIterations(20);
@ -107,7 +105,7 @@ TEST(BasisFactors, Print) {
const Vector measured = Vector::Ones(M) * 42;
auto model = Isotropic::Sigma(M, 1.0);
VectorEvaluationFactor<Chebyshev2, M> factor(key, measured, model, N, 0);
VectorEvaluationFactor<Chebyshev2> factor(key, measured, model, M, N, 0);
std::string expected =
" keys = { X0 }\n"
@ -128,16 +126,16 @@ TEST(BasisFactors, VectorComponentFactor) {
const size_t i = 2;
const double measured = 0.0, t = 3.0, a = 2.0, b = 4.0;
auto model = Isotropic::Sigma(1, 1.0);
VectorComponentFactor<Chebyshev2, P> factor(key, measured, model, N, i,
t, a, b);
VectorComponentFactor<Chebyshev2> factor(key, measured, model, P, N, i, t, a,
b);
NonlinearFactorGraph graph;
graph.add(factor);
ParameterMatrix<P> stateMatrix(N);
gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(P, N);
Values initial;
initial.insert<ParameterMatrix<P>>(key, stateMatrix);
initial.insert<gtsam::Matrix>(key, stateMatrix);
LevenbergMarquardtParams parameters;
parameters.setMaxIterations(20);
@ -153,16 +151,16 @@ TEST(BasisFactors, ManifoldEvaluationFactor) {
const Pose2 measured;
const double t = 3.0, a = 2.0, b = 4.0;
auto model = Isotropic::Sigma(3, 1.0);
ManifoldEvaluationFactor<Chebyshev2, Pose2> factor(key, measured, model, N,
t, a, b);
ManifoldEvaluationFactor<Chebyshev2, Pose2> factor(key, measured, model, N, t,
a, b);
NonlinearFactorGraph graph;
graph.add(factor);
ParameterMatrix<3> stateMatrix(N);
gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(3, N);
Values initial;
initial.insert<ParameterMatrix<3>>(key, stateMatrix);
initial.insert<gtsam::Matrix>(key, stateMatrix);
LevenbergMarquardtParams parameters;
parameters.setMaxIterations(20);
@ -170,6 +168,8 @@ TEST(BasisFactors, ManifoldEvaluationFactor) {
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
// Check Jacobians
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, initial, 1e-7, 1e-5);
}
//******************************************************************************
@ -179,15 +179,15 @@ TEST(BasisFactors, VecDerivativePrior) {
const Vector measured = Vector::Zero(M);
auto model = Isotropic::Sigma(M, 1.0);
VectorDerivativeFactor<Chebyshev2, M> vecDPrior(key, measured, model, N, 0);
VectorDerivativeFactor<Chebyshev2> vecDPrior(key, measured, model, M, N, 0);
NonlinearFactorGraph graph;
graph.add(vecDPrior);
ParameterMatrix<M> stateMatrix(N);
gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(M, N);
Values initial;
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
initial.insert<gtsam::Matrix>(key, stateMatrix);
LevenbergMarquardtParams parameters;
parameters.setMaxIterations(20);
@ -204,15 +204,15 @@ TEST(BasisFactors, ComponentDerivativeFactor) {
double measured = 0;
auto model = Isotropic::Sigma(1, 1.0);
ComponentDerivativeFactor<Chebyshev2, M> controlDPrior(key, measured, model,
N, 0, 0);
ComponentDerivativeFactor<Chebyshev2> controlDPrior(key, measured, model, M,
N, 0, 0);
NonlinearFactorGraph graph;
graph.add(controlDPrior);
Values initial;
ParameterMatrix<M> stateMatrix(N);
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(M, N);
initial.insert<gtsam::Matrix>(key, stateMatrix);
LevenbergMarquardtParams parameters;
parameters.setMaxIterations(20);

View File

@ -17,14 +17,15 @@
* methods
*/
#include <cstddef>
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h>
#include <gtsam/basis/Chebyshev2.h>
#include <gtsam/basis/FitBasis.h>
#include <gtsam/geometry/Pose2.h>
#include <gtsam/geometry/Pose3.h>
#include <gtsam/nonlinear/factorTesting.h>
#include <gtsam/base/Testable.h>
#include <CppUnitLite/TestHarness.h>
#include <cstddef>
#include <functional>
using namespace std;
@ -107,7 +108,7 @@ TEST(Chebyshev2, InterpolateVector) {
double t = 30, a = 0, b = 100;
const size_t N = 3;
// Create 2x3 matrix with Vectors at Chebyshev points
ParameterMatrix<2> X(N);
Matrix X = Matrix::Zero(2, N);
X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp
// Check value
@ -115,14 +116,15 @@ TEST(Chebyshev2, InterpolateVector) {
expected << t, 0;
Eigen::Matrix<double, /*2x2N*/ -1, -1> actualH(2, 2 * N);
Chebyshev2::VectorEvaluationFunctor<2> fx(N, t, a, b);
Chebyshev2::VectorEvaluationFunctor fx(2, N, t, a, b);
EXPECT(assert_equal(expected, fx(X, actualH), 1e-9));
// Check derivative
std::function<Vector2(ParameterMatrix<2>)> f = std::bind(
&Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, std::placeholders::_1, nullptr);
std::function<Vector2(Matrix)> f =
std::bind(&Chebyshev2::VectorEvaluationFunctor::operator(), fx,
std::placeholders::_1, nullptr);
Matrix numericalH =
numericalDerivative11<Vector2, ParameterMatrix<2>, 2 * N>(f, X);
numericalDerivative11<Vector2, Matrix, 2 * N>(f, X);
EXPECT(assert_equal(numericalH, actualH, 1e-9));
}
@ -131,25 +133,66 @@ TEST(Chebyshev2, InterpolateVector) {
TEST(Chebyshev2, InterpolatePose2) {
double t = 30, a = 0, b = 100;
ParameterMatrix<3> X(N);
Matrix X(3, N);
X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp
X.row(1) = Vector::Zero(N);
X.row(2) = 0.1 * Vector::Ones(N);
Vector xi(3);
xi << t, 0, 0.1;
Eigen::Matrix<double, /*3x3N*/ -1, -1> actualH(3, 3 * N);
Chebyshev2::ManifoldEvaluationFunctor<Pose2> fx(N, t, a, b);
// We use xi as canonical coordinates via exponential map
Pose2 expected = Pose2::ChartAtOrigin::Retract(xi);
EXPECT(assert_equal(expected, fx(X)));
EXPECT(assert_equal(expected, fx(X, actualH)));
// Check derivative
std::function<Pose2(Matrix)> f =
std::bind(&Chebyshev2::ManifoldEvaluationFunctor<Pose2>::operator(), fx,
std::placeholders::_1, nullptr);
Matrix numericalH =
numericalDerivative11<Pose2, Matrix, 3 * N>(f, X);
EXPECT(assert_equal(numericalH, actualH, 1e-9));
}
#ifdef GTSAM_POSE3_EXPMAP
//******************************************************************************
// Interpolating poses using the exponential map
TEST(Chebyshev2, InterpolatePose3) {
double a = 10, b = 100;
double t = Chebyshev2::Points(N, a, b)(11);
Rot3 R = Rot3::Ypr(-2.21366492e-05, -9.35353636e-03, -5.90463598e-04);
Pose3 pose(R, Point3(1, 2, 3));
Vector6 xi = Pose3::ChartAtOrigin::Local(pose);
Eigen::Matrix<double, /*6x6N*/ -1, -1> actualH(6, 6 * N);
Matrix X = Matrix::Zero(6, N);
X.col(11) = xi;
Chebyshev2::ManifoldEvaluationFunctor<Pose3> fx(N, t, a, b);
// We use xi as canonical coordinates via exponential map
Pose3 expected = Pose3::ChartAtOrigin::Retract(xi);
EXPECT(assert_equal(expected, fx(X, actualH)));
// Check derivative
std::function<Pose3(Matrix)> f =
std::bind(&Chebyshev2::ManifoldEvaluationFunctor<Pose3>::operator(), fx,
std::placeholders::_1, nullptr);
Matrix numericalH =
numericalDerivative11<Pose3, Matrix, 6 * N>(f, X);
EXPECT(assert_equal(numericalH, actualH, 1e-8));
}
#endif
//******************************************************************************
TEST(Chebyshev2, Decomposition) {
// Create example sequence
Sequence sequence;
for (size_t i = 0; i < 16; i++) {
double x = (1.0/ 16)*i - 0.99, y = x;
double x = (1.0 / 16) * i - 0.99, y = x;
sequence[x] = y;
}
@ -370,14 +413,14 @@ TEST(Chebyshev2, Derivative6_03) {
TEST(Chebyshev2, VectorDerivativeFunctor) {
const size_t N = 3, M = 2;
const double x = 0.2;
using VecD = Chebyshev2::VectorDerivativeFunctor<M>;
VecD fx(N, x, 0, 3);
ParameterMatrix<M> X(N);
using VecD = Chebyshev2::VectorDerivativeFunctor;
VecD fx(M, N, x, 0, 3);
Matrix X = Matrix::Zero(M, N);
Matrix actualH(M, M * N);
EXPECT(assert_equal(Vector::Zero(M), (Vector)fx(X, actualH), 1e-8));
// Test Jacobian
Matrix expectedH = numericalDerivative11<Vector2, ParameterMatrix<M>, M * N>(
Matrix expectedH = numericalDerivative11<Vector2, Matrix, M * N>(
std::bind(&VecD::operator(), fx, std::placeholders::_1, nullptr), X);
EXPECT(assert_equal(expectedH, actualH, 1e-7));
}
@ -386,30 +429,29 @@ TEST(Chebyshev2, VectorDerivativeFunctor) {
// Test VectorDerivativeFunctor with polynomial function
TEST(Chebyshev2, VectorDerivativeFunctor2) {
const size_t N = 64, M = 1, T = 15;
using VecD = Chebyshev2::VectorDerivativeFunctor<M>;
using VecD = Chebyshev2::VectorDerivativeFunctor;
const Vector points = Chebyshev2::Points(N, 0, T);
// Assign the parameter matrix
Vector values(N);
// Assign the parameter matrix 1xN
Matrix X(1, N);
for (size_t i = 0; i < N; ++i) {
values(i) = f(points(i));
X(i) = f(points(i));
}
ParameterMatrix<M> X(values);
// Evaluate the derivative at the chebyshev points using
// VectorDerivativeFunctor.
for (size_t i = 0; i < N; ++i) {
VecD d(N, points(i), 0, T);
VecD d(M, N, points(i), 0, T);
Vector1 Dx = d(X);
EXPECT_DOUBLES_EQUAL(fprime(points(i)), Dx(0), 1e-6);
}
// Test Jacobian at the first chebyshev point.
Matrix actualH(M, M * N);
VecD vecd(N, points(0), 0, T);
VecD vecd(M, N, points(0), 0, T);
vecd(X, actualH);
Matrix expectedH = numericalDerivative11<Vector1, ParameterMatrix<M>, M * N>(
Matrix expectedH = numericalDerivative11<Vector1, Matrix, M * N>(
std::bind(&VecD::operator(), vecd, std::placeholders::_1, nullptr), X);
EXPECT(assert_equal(expectedH, actualH, 1e-6));
}
@ -419,14 +461,14 @@ TEST(Chebyshev2, VectorDerivativeFunctor2) {
TEST(Chebyshev2, ComponentDerivativeFunctor) {
const size_t N = 6, M = 2;
const double x = 0.2;
using CompFunc = Chebyshev2::ComponentDerivativeFunctor<M>;
using CompFunc = Chebyshev2::ComponentDerivativeFunctor;
size_t row = 1;
CompFunc fx(N, row, x, 0, 3);
ParameterMatrix<M> X(N);
CompFunc fx(M, N, row, x, 0, 3);
Matrix X = Matrix::Zero(M, N);
Matrix actualH(1, M * N);
EXPECT_DOUBLES_EQUAL(0, fx(X, actualH), 1e-8);
Matrix expectedH = numericalDerivative11<double, ParameterMatrix<M>, M * N>(
Matrix expectedH = numericalDerivative11<double, Matrix, M * N>(
std::bind(&CompFunc::operator(), fx, std::placeholders::_1, nullptr), X);
EXPECT(assert_equal(expectedH, actualH, 1e-7));
}

View File

@ -180,17 +180,16 @@ TEST(Basis, Derivative7) {
//******************************************************************************
TEST(Basis, VecDerivativeFunctor) {
using DotShape = typename FourierBasis::VectorDerivativeFunctor<2>;
using DotShape = typename FourierBasis::VectorDerivativeFunctor;
const size_t N = 3;
// MATLAB example, Dec 27 2019, commit 014eded5
double h = 2 * M_PI / 16;
Vector2 dotShape(0.5556, -0.8315); // at h/2
DotShape dotShapeFunction(N, h / 2);
Matrix23 theta_mat = (Matrix32() << 0, 0, 0.7071, 0.7071, 0.7071, -0.7071)
.finished()
.transpose();
ParameterMatrix<2> theta(theta_mat);
DotShape dotShapeFunction(2, N, h / 2);
Matrix theta = (Matrix32() << 0, 0, 0.7071, 0.7071, 0.7071, -0.7071)
.finished()
.transpose();
EXPECT(assert_equal(Vector(dotShape), dotShapeFunction(theta), 1e-4));
}

View File

@ -1,145 +0,0 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testParameterMatrix.cpp
* @date Sep 22, 2020
* @author Varun Agrawal, Frank Dellaert
* @brief Unit tests for ParameterMatrix.h
*/
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h>
#include <gtsam/basis/BasisFactors.h>
#include <gtsam/basis/Chebyshev2.h>
#include <gtsam/basis/ParameterMatrix.h>
#include <gtsam/inference/Symbol.h>
using namespace std;
using namespace gtsam;
using Parameters = Chebyshev2::Parameters;
const size_t M = 2, N = 5;
//******************************************************************************
TEST(ParameterMatrix, Constructor) {
ParameterMatrix<2> actual1(3);
ParameterMatrix<2> expected1(Matrix::Zero(2, 3));
EXPECT(assert_equal(expected1, actual1));
ParameterMatrix<2> actual2(Matrix::Ones(2, 3));
ParameterMatrix<2> expected2(Matrix::Ones(2, 3));
EXPECT(assert_equal(expected2, actual2));
EXPECT(assert_equal(expected2.matrix(), actual2.matrix()));
}
//******************************************************************************
TEST(ParameterMatrix, Dimensions) {
ParameterMatrix<M> params(N);
EXPECT_LONGS_EQUAL(params.rows(), M);
EXPECT_LONGS_EQUAL(params.cols(), N);
EXPECT_LONGS_EQUAL(params.dim(), M * N);
}
//******************************************************************************
TEST(ParameterMatrix, Getters) {
ParameterMatrix<M> params(N);
Matrix expectedMatrix = Matrix::Zero(2, 5);
EXPECT(assert_equal(expectedMatrix, params.matrix()));
Matrix expectedMatrixTranspose = Matrix::Zero(5, 2);
EXPECT(assert_equal(expectedMatrixTranspose, params.transpose()));
ParameterMatrix<M> p2(Matrix::Ones(M, N));
Vector expectedRowVector = Vector::Ones(N);
for (size_t r = 0; r < M; ++r) {
EXPECT(assert_equal(p2.row(r), expectedRowVector));
}
ParameterMatrix<M> p3(2 * Matrix::Ones(M, N));
Vector expectedColVector = 2 * Vector::Ones(M);
for (size_t c = 0; c < M; ++c) {
EXPECT(assert_equal(p3.col(c), expectedColVector));
}
}
//******************************************************************************
TEST(ParameterMatrix, Setters) {
ParameterMatrix<M> params(Matrix::Zero(M, N));
Matrix expected = Matrix::Zero(M, N);
// row
params.row(0) = Vector::Ones(N);
expected.row(0) = Vector::Ones(N);
EXPECT(assert_equal(expected, params.matrix()));
// col
params.col(2) = Vector::Ones(M);
expected.col(2) = Vector::Ones(M);
EXPECT(assert_equal(expected, params.matrix()));
// setZero
params.setZero();
expected.setZero();
EXPECT(assert_equal(expected, params.matrix()));
}
//******************************************************************************
TEST(ParameterMatrix, Addition) {
ParameterMatrix<M> params(Matrix::Ones(M, N));
ParameterMatrix<M> expected(2 * Matrix::Ones(M, N));
// Add vector
EXPECT(assert_equal(expected, params + Vector::Ones(M * N)));
// Add another ParameterMatrix
ParameterMatrix<M> actual = params + ParameterMatrix<M>(Matrix::Ones(M, N));
EXPECT(assert_equal(expected, actual));
}
//******************************************************************************
TEST(ParameterMatrix, Subtraction) {
ParameterMatrix<M> params(2 * Matrix::Ones(M, N));
ParameterMatrix<M> expected(Matrix::Ones(M, N));
// Subtract vector
EXPECT(assert_equal(expected, params - Vector::Ones(M * N)));
// Subtract another ParameterMatrix
ParameterMatrix<M> actual = params - ParameterMatrix<M>(Matrix::Ones(M, N));
EXPECT(assert_equal(expected, actual));
}
//******************************************************************************
TEST(ParameterMatrix, Multiplication) {
ParameterMatrix<M> params(Matrix::Ones(M, N));
Matrix multiplier = 2 * Matrix::Ones(N, 2);
Matrix expected = 2 * N * Matrix::Ones(M, 2);
EXPECT(assert_equal(expected, params * multiplier));
}
//******************************************************************************
TEST(ParameterMatrix, VectorSpace) {
ParameterMatrix<M> params(Matrix::Ones(M, N));
// vector
EXPECT(assert_equal(Vector::Ones(M * N), params.vector()));
// identity
EXPECT(assert_equal(ParameterMatrix<M>::Identity(),
ParameterMatrix<M>(Matrix::Zero(M, 0))));
}
//******************************************************************************
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
//******************************************************************************

View File

@ -21,7 +21,6 @@
#include <gtsam/discrete/TableFactor.h>
#include <gtsam/hybrid/HybridValues.h>
#include <boost/format.hpp>
#include <utility>
using namespace std;
@ -203,7 +202,7 @@ void TableFactor::print(const string& s, const KeyFormatter& formatter) const {
cout << s;
cout << " f[";
for (auto&& key : keys())
cout << boost::format(" (%1%,%2%),") % formatter(key) % cardinality(key);
cout << " (" << formatter(key) << "," << cardinality(key) << "),";
cout << " ]" << endl;
for (SparseIt it(sparse_table_); it; ++it) {
DiscreteValues assignment = findAssignments(it.index());

View File

@ -258,10 +258,19 @@ public:
inline const Rot2& r() const { return r_; }
/// translation
inline const Point2& translation() const { return t_; }
inline const Point2& translation(OptionalJacobian<2, 3> Hself={}) const {
if (Hself) {
*Hself = Matrix::Zero(2, 3);
(*Hself).block<2, 2>(0, 0) = rotation().matrix();
}
return t_;
}
/// rotation
inline const Rot2& rotation() const { return r_; }
inline const Rot2& rotation(OptionalJacobian<1, 3> Hself={}) const {
if (Hself) *Hself << 0, 0, 1;
return r_;
}
//// return transformation matrix
GTSAM_EXPORT Matrix3 matrix() const;

View File

@ -560,8 +560,8 @@ class GTSAM_EXPORT Rot3 : public LieGroup<Rot3, 3> {
#endif
};
/// std::vector of Rot3s, mainly for wrapper
using Rot3Vector = std::vector<Rot3, Eigen::aligned_allocator<Rot3> >;
/// std::vector of Rot3s, used in Matlab wrapper
using Rot3Vector = std::vector<Rot3, Eigen::aligned_allocator<Rot3>>;
/**
* [RQ] receives a 3 by 3 matrix and returns an upper triangular matrix R

View File

@ -28,7 +28,8 @@ class Point2 {
// enabling serialization functionality
void serialize() const;
};
// Used in Matlab wrapper
class Point2Pairs {
Point2Pairs();
size_t size() const;
@ -38,6 +39,7 @@ class Point2Pairs {
};
// std::vector<gtsam::Point2>
// Used in Matlab wrapper
class Point2Vector {
// Constructors
Point2Vector();
@ -131,6 +133,7 @@ class Point3 {
gtsam::Point3 normalize(const gtsam::Point3 &p, Eigen::Ref<Eigen::MatrixXd> H) const;
};
// Used in Matlab wrapper
class Point3Pairs {
Point3Pairs();
size_t size() const;
@ -431,7 +434,9 @@ class Pose2 {
gtsam::Rot2 bearing(const gtsam::Point2& point) const;
double range(const gtsam::Point2& point) const;
gtsam::Point2 translation() const;
gtsam::Point2 translation(Eigen::Ref<Eigen::MatrixXd> Hself) const;
gtsam::Rot2 rotation() const;
gtsam::Rot2 rotation(Eigen::Ref<Eigen::MatrixXd> Hself) const;
Matrix matrix() const;
// enabling serialization functionality
@ -542,6 +547,7 @@ class Pose3 {
void serialize() const;
};
// Used in Matlab wrapper
class Pose3Pairs {
Pose3Pairs();
size_t size() const;
@ -550,6 +556,7 @@ class Pose3Pairs {
void push_back(const gtsam::Pose3Pair& pose_pair);
};
// Used in Matlab wrapper
class Pose3Vector {
Pose3Vector();
size_t size() const;
@ -1174,8 +1181,8 @@ class TriangulationParameters {
const gtsam::SharedNoiseModel& noiseModel = nullptr);
};
// Templates appear not yet supported for free functions - issue raised at
// borglab/wrap#14 to add support
// Can be templated but overloaded for convenience.
// We can now call `triangulatePoint3` with any template type.
// Cal3_S2 versions
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,

View File

@ -474,6 +474,33 @@ TEST( Pose2, compose_matrix )
EXPECT(assert_equal(gM1*_1M2,matrix(gT1.compose(_1T2)))); // RIGHT DOES NOT
}
/* ************************************************************************* */
TEST( Pose2, translation ) {
Pose2 pose(3.5, -8.2, 4.2);
Matrix actualH;
EXPECT(assert_equal((Vector2() << 3.5, -8.2).finished(), pose.translation(actualH), 1e-8));
std::function<Point2(const Pose2&)> f = [](const Pose2& T) { return T.translation(); };
Matrix numericalH = numericalDerivative11<Point2, Pose2>(f, pose);
EXPECT(assert_equal(numericalH, actualH, 1e-6));
}
/* ************************************************************************* */
TEST( Pose2, rotation ) {
Pose2 pose(3.5, -8.2, 4.2);
Matrix actualH(4, 3);
EXPECT(assert_equal(Rot2(4.2), pose.rotation(actualH), 1e-8));
std::function<Rot2(const Pose2&)> f = [](const Pose2& T) { return T.rotation(); };
Matrix numericalH = numericalDerivative11<Rot2, Pose2>(f, pose);
EXPECT(assert_equal(numericalH, actualH, 1e-6));
}
/* ************************************************************************* */
TEST( Pose2, between )
{

View File

@ -85,7 +85,8 @@ Vector4 triangulateHomogeneousDLT(
Point3 triangulateLOST(const std::vector<Pose3>& poses,
const Point3Vector& calibratedMeasurements,
const SharedIsotropic& measurementNoise) {
const SharedIsotropic& measurementNoise,
double rank_tol) {
size_t m = calibratedMeasurements.size();
assert(m == poses.size());
@ -96,17 +97,38 @@ Point3 triangulateLOST(const std::vector<Pose3>& poses,
for (size_t i = 0; i < m; i++) {
const Pose3& wTi = poses[i];
// TODO(akshay-krishnan): are there better ways to select j?
const int j = (i + 1) % m;
int j = (i + 1) % m;
const Pose3& wTj = poses[j];
const Point3 d_ij = wTj.translation() - wTi.translation();
Point3 d_ij = wTj.translation() - wTi.translation();
Point3 wZi = wTi.rotation().rotate(calibratedMeasurements[i]);
Point3 wZj = wTj.rotation().rotate(calibratedMeasurements[j]);
double num_i = wZi.cross(wZj).norm();
double den_i = d_ij.cross(wZj).norm();
const Point3 wZi = wTi.rotation().rotate(calibratedMeasurements[i]);
const Point3 wZj = wTj.rotation().rotate(calibratedMeasurements[j]);
// Handle q_i = 0 (or NaN), which arises if the measurement vectors, wZi and
// wZj, coincide (or the baseline vector coincides with the jth measurement
// vector).
if (num_i == 0 || den_i == 0) {
bool success = false;
for (size_t k = 2; k < m; k++) {
j = (i + k) % m;
const Pose3& wTj = poses[j];
d_ij = wTj.translation() - wTi.translation();
wZj = wTj.rotation().rotate(calibratedMeasurements[j]);
num_i = wZi.cross(wZj).norm();
den_i = d_ij.cross(wZj).norm();
if (num_i > 0 && den_i > 0) {
success = true;
break;
}
}
if (!success) throw(TriangulationUnderconstrainedException());
}
// Note: Setting q_i = 1.0 gives same results as DLT.
const double q_i = wZi.cross(wZj).norm() /
(measurementNoise->sigma() * d_ij.cross(wZj).norm());
const double q_i = num_i / (measurementNoise->sigma() * den_i);
const Matrix23 coefficientMat =
q_i * skewSymmetric(calibratedMeasurements[i]).topLeftCorner(2, 3) *
@ -115,7 +137,13 @@ Point3 triangulateLOST(const std::vector<Pose3>& poses,
A.block<2, 3>(2 * i, 0) << coefficientMat;
b.block<2, 1>(2 * i, 0) << coefficientMat * wTi.translation();
}
return A.colPivHouseholderQr().solve(b);
Eigen::ColPivHouseholderQR<Matrix> A_Qr = A.colPivHouseholderQr();
A_Qr.setThreshold(rank_tol);
if (A_Qr.rank() < 3) throw(TriangulationUnderconstrainedException());
return A_Qr.solve(b);
}
Point3 triangulateDLT(

View File

@ -110,7 +110,8 @@ GTSAM_EXPORT Point3 triangulateDLT(
*/
GTSAM_EXPORT Point3 triangulateLOST(const std::vector<Pose3>& poses,
const Point3Vector& calibratedMeasurements,
const SharedIsotropic& measurementNoise);
const SharedIsotropic& measurementNoise,
double rank_tol = 1e-9);
/**
* Create a factor graph with projection factors from poses and one calibration
@ -439,7 +440,8 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
auto calibratedMeasurements =
calibrateMeasurementsShared<CALIBRATION>(*sharedCal, measurements);
point = triangulateLOST(poses, calibratedMeasurements, measurementNoise);
point = triangulateLOST(poses, calibratedMeasurements, measurementNoise,
rank_tol);
} else {
// construct projection matrices from poses & calibration
auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal);
@ -512,7 +514,8 @@ Point3 triangulatePoint3(const CameraSet<CAMERA>& cameras,
auto calibratedMeasurements =
calibrateMeasurements<CAMERA>(cameras, measurements);
point = triangulateLOST(poses, calibratedMeasurements, measurementNoise);
point = triangulateLOST(poses, calibratedMeasurements, measurementNoise,
rank_tol);
} else {
// construct projection matrices from poses & calibration
auto projection_matrices = projectionMatricesFromCameras(cameras);
@ -750,4 +753,3 @@ using CameraSetCal3Fisheye = CameraSet<PinholeCamera<Cal3Fisheye>>;
using CameraSetCal3Unified = CameraSet<PinholeCamera<Cal3Unified>>;
using CameraSetSpherical = CameraSet<SphericalCamera>;
} // \namespace gtsam

View File

@ -105,6 +105,7 @@ class KeyGroupMap {
};
// Actually a FastSet<FactorIndex>
// Used in Matlab wrapper
class FactorIndexSet {
FactorIndexSet();
FactorIndexSet(const gtsam::FactorIndexSet& set);
@ -121,6 +122,7 @@ class FactorIndexSet {
};
// Actually a vector<FactorIndex>
// Used in Matlab wrapper
class FactorIndices {
FactorIndices();
FactorIndices(const gtsam::FactorIndices& other);

View File

@ -109,6 +109,7 @@ class Ordering {
FACTOR_GRAPH = {gtsam::NonlinearFactorGraph, gtsam::DiscreteFactorGraph,
gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph, gtsam::HybridGaussianFactorGraph}>
static gtsam::Ordering Colamd(const FACTOR_GRAPH& graph);
static gtsam::Ordering Colamd(const gtsam::VariableIndex& variableIndex);
template <
FACTOR_GRAPH = {gtsam::NonlinearFactorGraph, gtsam::DiscreteFactorGraph,
@ -176,13 +177,9 @@ class DotWriter {
class VariableIndex {
// Standard Constructors and Named Constructors
VariableIndex();
// TODO: Templetize constructor when wrap supports it
// template<T = {gtsam::FactorGraph}>
// VariableIndex(const T& factorGraph, size_t nVariables);
// VariableIndex(const T& factorGraph);
VariableIndex(const gtsam::SymbolicFactorGraph& sfg);
VariableIndex(const gtsam::GaussianFactorGraph& gfg);
VariableIndex(const gtsam::NonlinearFactorGraph& fg);
template <T = {gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph,
gtsam::NonlinearFactorGraph}>
VariableIndex(const T& factorGraph);
VariableIndex(const gtsam::VariableIndex& other);
// Testable

View File

@ -28,7 +28,7 @@ Vector CustomFactor::unwhitenedError(const Values& x, OptionalMatrixVecType H) c
if(H) {
/*
* In this case, we pass the raw pointer to the `std::vector<Matrix>` object directly to pybind.
* As the type `std::vector<Matrix>` has been marked as opaque in `preamble.h`, any changes in
* As the type `std::vector<Matrix>` has been marked as opaque in `preamble/custom.h`, any changes in
* Python will be immediately reflected on the C++ side.
*
* Example:

View File

@ -73,8 +73,7 @@ class GncParams {
double weightsTol = 1e-4; ///< If the weights are within weightsTol from being binary, stop iterating (only for TLS)
Verbosity verbosity = SILENT; ///< Verbosity level
//TODO(Varun) replace IndexVector with vector<size_t> once pybind11/stl.h is globally enabled.
/// Use IndexVector for inliers and outliers since it is fast + wrapping
/// Use IndexVector for inliers and outliers since it is fast
using IndexVector = FastVector<uint64_t>;
///< Slots in the factor graph corresponding to measurements that we know are inliers
IndexVector knownInliers = IndexVector();

View File

@ -25,7 +25,6 @@ namespace gtsam {
#include <gtsam/geometry/Unit3.h>
#include <gtsam/navigation/ImuBias.h>
#include <gtsam/navigation/NavState.h>
#include <gtsam/basis/ParameterMatrix.h>
#include <gtsam/nonlinear/GraphvizFormatting.h>
class GraphvizFormatting : gtsam::DotWriter {
@ -305,8 +304,8 @@ virtual class GncParams {
double relativeCostTol;
double weightsTol;
gtsam::This::Verbosity verbosity;
gtsam::KeyVector knownInliers;
gtsam::KeyVector knownOutliers;
gtsam::This::IndexVector knownInliers;
gtsam::This::IndexVector knownOutliers;
void setLossType(const gtsam::GncLossType type);
void setMaxIterations(const size_t maxIter);
@ -314,8 +313,8 @@ virtual class GncParams {
void setRelativeCostTol(double value);
void setWeightsTol(double value);
void setVerbosityGNC(const gtsam::This::Verbosity value);
void setKnownInliers(const gtsam::KeyVector& knownIn);
void setKnownOutliers(const gtsam::KeyVector& knownOut);
void setKnownInliers(const gtsam::This::IndexVector& knownIn);
void setKnownOutliers(const gtsam::This::IndexVector& knownOut);
void print(const string& str = "GncParams: ") const;
enum Verbosity {

View File

@ -25,7 +25,6 @@ namespace gtsam {
#include <gtsam/geometry/Unit3.h>
#include <gtsam/navigation/ImuBias.h>
#include <gtsam/navigation/NavState.h>
#include <gtsam/basis/ParameterMatrix.h>
#include <gtsam/linear/VectorValues.h>
@ -96,21 +95,6 @@ class Values {
void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias);
void insert(size_t j, const gtsam::NavState& nav_state);
void insert(size_t j, double c);
void insert(size_t j, const gtsam::ParameterMatrix<1>& X);
void insert(size_t j, const gtsam::ParameterMatrix<2>& X);
void insert(size_t j, const gtsam::ParameterMatrix<3>& X);
void insert(size_t j, const gtsam::ParameterMatrix<4>& X);
void insert(size_t j, const gtsam::ParameterMatrix<5>& X);
void insert(size_t j, const gtsam::ParameterMatrix<6>& X);
void insert(size_t j, const gtsam::ParameterMatrix<7>& X);
void insert(size_t j, const gtsam::ParameterMatrix<8>& X);
void insert(size_t j, const gtsam::ParameterMatrix<9>& X);
void insert(size_t j, const gtsam::ParameterMatrix<10>& X);
void insert(size_t j, const gtsam::ParameterMatrix<11>& X);
void insert(size_t j, const gtsam::ParameterMatrix<12>& X);
void insert(size_t j, const gtsam::ParameterMatrix<13>& X);
void insert(size_t j, const gtsam::ParameterMatrix<14>& X);
void insert(size_t j, const gtsam::ParameterMatrix<15>& X);
template <T = {gtsam::Point2, gtsam::Point3}>
void insert(size_t j, const T& val);
@ -144,21 +128,6 @@ class Values {
void update(size_t j, Vector vector);
void update(size_t j, Matrix matrix);
void update(size_t j, double c);
void update(size_t j, const gtsam::ParameterMatrix<1>& X);
void update(size_t j, const gtsam::ParameterMatrix<2>& X);
void update(size_t j, const gtsam::ParameterMatrix<3>& X);
void update(size_t j, const gtsam::ParameterMatrix<4>& X);
void update(size_t j, const gtsam::ParameterMatrix<5>& X);
void update(size_t j, const gtsam::ParameterMatrix<6>& X);
void update(size_t j, const gtsam::ParameterMatrix<7>& X);
void update(size_t j, const gtsam::ParameterMatrix<8>& X);
void update(size_t j, const gtsam::ParameterMatrix<9>& X);
void update(size_t j, const gtsam::ParameterMatrix<10>& X);
void update(size_t j, const gtsam::ParameterMatrix<11>& X);
void update(size_t j, const gtsam::ParameterMatrix<12>& X);
void update(size_t j, const gtsam::ParameterMatrix<13>& X);
void update(size_t j, const gtsam::ParameterMatrix<14>& X);
void update(size_t j, const gtsam::ParameterMatrix<15>& X);
void insert_or_assign(size_t j, const gtsam::Point2& point2);
void insert_or_assign(size_t j, const gtsam::Point3& point3);
@ -199,21 +168,6 @@ class Values {
void insert_or_assign(size_t j, Vector vector);
void insert_or_assign(size_t j, Matrix matrix);
void insert_or_assign(size_t j, double c);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<1>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<2>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<3>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<4>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<5>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<6>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<7>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<8>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<9>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<10>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<11>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<12>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<13>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<14>& X);
void insert_or_assign(size_t j, const gtsam::ParameterMatrix<15>& X);
template <T = {gtsam::Point2,
gtsam::Point3,
@ -243,22 +197,7 @@ class Values {
gtsam::NavState,
Vector,
Matrix,
double,
gtsam::ParameterMatrix<1>,
gtsam::ParameterMatrix<2>,
gtsam::ParameterMatrix<3>,
gtsam::ParameterMatrix<4>,
gtsam::ParameterMatrix<5>,
gtsam::ParameterMatrix<6>,
gtsam::ParameterMatrix<7>,
gtsam::ParameterMatrix<8>,
gtsam::ParameterMatrix<9>,
gtsam::ParameterMatrix<10>,
gtsam::ParameterMatrix<11>,
gtsam::ParameterMatrix<12>,
gtsam::ParameterMatrix<13>,
gtsam::ParameterMatrix<14>,
gtsam::ParameterMatrix<15>}>
double}>
T at(size_t j);
};

View File

@ -6,15 +6,14 @@ namespace gtsam {
#include <gtsam/sfm/SfmTrack.h>
class SfmTrack2d {
std::vector<pair<size_t, gtsam::Point2>> measurements;
std::vector<gtsam::SfmMeasurement> measurements;
SfmTrack2d();
SfmTrack2d(const std::vector<gtsam::SfmMeasurement>& measurements);
size_t numberMeasurements() const;
pair<size_t, gtsam::Point2> measurement(size_t idx) const;
gtsam::SfmMeasurement measurement(size_t idx) const;
pair<size_t, size_t> siftIndex(size_t idx) const;
void addMeasurement(size_t idx, const gtsam::Point2& m);
gtsam::SfmMeasurement measurement(size_t idx) const;
bool hasUniqueCameras() const;
Eigen::MatrixX2d measurementMatrix() const;
Eigen::VectorXi indexVector() const;
@ -100,6 +99,7 @@ typedef gtsam::BinaryMeasurement<gtsam::Unit3> BinaryMeasurementUnit3;
typedef gtsam::BinaryMeasurement<gtsam::Rot3> BinaryMeasurementRot3;
typedef gtsam::BinaryMeasurement<gtsam::Point3> BinaryMeasurementPoint3;
// Used in Matlab wrapper
class BinaryMeasurementsUnit3 {
BinaryMeasurementsUnit3();
size_t size() const;
@ -107,6 +107,7 @@ class BinaryMeasurementsUnit3 {
void push_back(const gtsam::BinaryMeasurement<gtsam::Unit3>& measurement);
};
// Used in Matlab wrapper
class BinaryMeasurementsPoint3 {
BinaryMeasurementsPoint3();
size_t size() const;
@ -114,6 +115,7 @@ class BinaryMeasurementsPoint3 {
void push_back(const gtsam::BinaryMeasurement<gtsam::Point3>& measurement);
};
// Used in Matlab wrapper
class BinaryMeasurementsRot3 {
BinaryMeasurementsRot3();
size_t size() const;
@ -121,41 +123,19 @@ class BinaryMeasurementsRot3 {
void push_back(const gtsam::BinaryMeasurement<gtsam::Rot3>& measurement);
};
#include <gtsam/slam/dataset.h>
#include <gtsam/sfm/ShonanAveraging.h>
// TODO(frank): copy/pasta below until we have integer template parameters in
// wrap!
class ShonanAveragingParameters2 {
ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm);
ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm,
template <d={2, 3}>
class ShonanAveragingParameters {
ShonanAveragingParameters(const gtsam::LevenbergMarquardtParams& lm);
ShonanAveragingParameters(const gtsam::LevenbergMarquardtParams& lm,
string method);
gtsam::LevenbergMarquardtParams getLMParams() const;
void setOptimalityThreshold(double value);
double getOptimalityThreshold() const;
void setAnchor(size_t index, const gtsam::Rot2& value);
pair<size_t, gtsam::Rot2> getAnchor();
void setAnchorWeight(double value);
double getAnchorWeight() const;
void setKarcherWeight(double value);
double getKarcherWeight() const;
void setGaugesWeight(double value);
double getGaugesWeight() const;
void setUseHuber(bool value);
bool getUseHuber() const;
void setCertifyOptimality(bool value);
bool getCertifyOptimality() const;
};
class ShonanAveragingParameters3 {
ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm);
ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm,
string method);
gtsam::LevenbergMarquardtParams getLMParams() const;
void setOptimalityThreshold(double value);
double getOptimalityThreshold() const;
void setAnchor(size_t index, const gtsam::Rot3& value);
pair<size_t, gtsam::Rot3> getAnchor();
void setAnchor(size_t index, const gtsam::This::Rot& value);
pair<size_t, gtsam::This::Rot> getAnchor();
void setAnchorWeight(double value);
double getAnchorWeight() const;
void setKarcherWeight(double value);
@ -168,6 +148,7 @@ class ShonanAveragingParameters3 {
bool getCertifyOptimality() const;
};
// NOTE(Varun): Not templated because each class has specializations defined.
class ShonanAveraging2 {
ShonanAveraging2(string g2oFile);
ShonanAveraging2(string g2oFile,
@ -214,18 +195,16 @@ class ShonanAveraging2 {
};
class ShonanAveraging3 {
ShonanAveraging3(
const std::vector<gtsam::BinaryMeasurement<gtsam::Rot3>>& measurements,
const gtsam::ShonanAveragingParameters3& parameters =
gtsam::ShonanAveragingParameters3());
ShonanAveraging3(const gtsam::This::Measurements& measurements,
const gtsam::ShonanAveragingParameters3& parameters =
gtsam::ShonanAveragingParameters3());
ShonanAveraging3(string g2oFile);
ShonanAveraging3(string g2oFile,
const gtsam::ShonanAveragingParameters3& parameters);
// TODO(frank): deprecate once we land pybind wrapper
ShonanAveraging3(const gtsam::BetweenFactorPose3s& factors);
ShonanAveraging3(const gtsam::BetweenFactorPose3s& factors,
const gtsam::ShonanAveragingParameters3& parameters);
const gtsam::ShonanAveragingParameters3& parameters =
gtsam::ShonanAveragingParameters3());
// Query properties
size_t nrUnknowns() const;
@ -267,6 +246,7 @@ class ShonanAveraging3 {
#include <gtsam/sfm/MFAS.h>
// Used in Matlab wrapper
class KeyPairDoubleMap {
KeyPairDoubleMap();
KeyPairDoubleMap(const gtsam::KeyPairDoubleMap& other);
@ -310,12 +290,9 @@ class TranslationRecovery {
const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
const double scale,
const gtsam::BinaryMeasurementsPoint3& betweenTranslations) const;
// default empty betweenTranslations
gtsam::Values run(const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
const double scale) const;
// default scale = 1.0, empty betweenTranslations
gtsam::Values run(
const gtsam::BinaryMeasurementsUnit3& relativeTranslations) const;
gtsam::Values run(const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
const double scale = 1.0) const;
};
namespace gtsfm {

View File

@ -251,7 +251,7 @@ void save2D(const gtsam::NonlinearFactorGraph& graph,
string filename);
// std::vector<gtsam::BetweenFactor<Pose2>::shared_ptr>
// Ignored by pybind -> will be List[BetweenFactorPose2]
// Used in Matlab wrapper
class BetweenFactorPose2s {
BetweenFactorPose2s();
size_t size() const;
@ -261,13 +261,14 @@ class BetweenFactorPose2s {
gtsam::BetweenFactorPose2s parse2DFactors(string filename);
// std::vector<gtsam::BetweenFactor<Pose3>::shared_ptr>
// Ignored by pybind -> will be List[BetweenFactorPose3]
// Used in Matlab wrapper
class BetweenFactorPose3s {
BetweenFactorPose3s();
size_t size() const;
gtsam::BetweenFactor<gtsam::Pose3>* at(size_t i) const;
void push_back(const gtsam::BetweenFactor<gtsam::Pose3>* factor);
};
gtsam::BetweenFactorPose3s parse3DFactors(string filename);
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load3D(string filename);

View File

@ -48,10 +48,12 @@ set(ignore
gtsam::ISAM2ThresholdMapValue
gtsam::FactorIndices
gtsam::FactorIndexSet
gtsam::IndexPairSet
gtsam::IndexPairSetMap
gtsam::IndexPairVector
gtsam::BetweenFactorPose2s
gtsam::BetweenFactorPose3s
gtsam::FixedLagSmootherKeyTimestampMap
gtsam::FixedLagSmootherKeyTimestampMapValue
gtsam::Point2Vector
gtsam::Point2Pairs

View File

@ -4,9 +4,49 @@
import sys
from gtsam.utils import findExampleDataFile
from gtsam import gtsam, utils
from gtsam.gtsam import *
from gtsam.utils import findExampleDataFile
#### Typedefs to allow for backwards compatibility
#TODO(Varun) deprecate in future release
# gtsam
KeyVector = list
# base
IndexPairSetMap = dict
IndexPairVector = list
# geometry
Point2Vector = list
Pose3Vector = list
Rot3Vector = list
Point2Pairs = list
Point3Pairs = list
Pose2Pairs = list
Pose3Pairs = list
# sfm
BinaryMeasurementsPoint3 = list
BinaryMeasurementsUnit3 = list
BinaryMeasurementsRot3 = list
KeyPairDoubleMap = dict
SfmTrack2dVector = list
SfmTracks = list
SfmCameras = list
SfmMeasurementVector = list
MatchIndicesMap = dict
KeypointsVector = list
# slam
BetweenFactorPose3s = list
BetweenFactorPose2s = list
class FixedLagSmootherKeyTimestampMap(dict):
"""Class to provide backwards compatibility"""
def insert(self, key_value):
self[key_value[0]] = key_value[1]
#### End typedefs
def _init():

View File

@ -16,6 +16,7 @@ import numpy as np
import gtsam
import gtsam_unstable
def BatchFixedLagSmootherExample():
"""
Runs a batch fixed smoother on an agent with two odometry
@ -31,7 +32,7 @@ def BatchFixedLagSmootherExample():
# that will be sent to the smoothers
new_factors = gtsam.NonlinearFactorGraph()
new_values = gtsam.Values()
new_timestamps = gtsam.FixedLagSmootherKeyTimestampMap()
new_timestamps = {}
# Create a prior on the first pose, placing it at the origin
prior_mean = gtsam.Pose2(0, 0, 0)
@ -39,7 +40,7 @@ def BatchFixedLagSmootherExample():
X1 = 0
new_factors.push_back(gtsam.PriorFactorPose2(X1, prior_mean, prior_noise))
new_values.insert(X1, prior_mean)
new_timestamps.insert((X1, 0.0))
new_timestamps[X1] = 0.0
delta_time = 0.25
time = 0.25
@ -49,7 +50,7 @@ def BatchFixedLagSmootherExample():
current_key = int(1000 * time)
# assign current key to the current timestamp
new_timestamps.insert((current_key, time))
new_timestamps[current_key] = time
# Add a guess for this pose to the new values
# Assume that the robot moves at 2 m/s. Position is time[s] * 2[m/s]

View File

@ -17,9 +17,8 @@ Date: September 2020
from collections import defaultdict
from typing import List, Tuple
import numpy as np
import gtsam
import numpy as np
from gtsam.examples import SFMdata
# Hyperparameters for 1dsfm, values used from Kyle Wilson's code.
@ -59,7 +58,7 @@ def get_data() -> Tuple[gtsam.Values, List[gtsam.BinaryMeasurementUnit3]]:
return wRc_values, i_iZj_list
def filter_outliers(w_iZj_list: gtsam.BinaryMeasurementsUnit3) -> gtsam.BinaryMeasurementsUnit3:
def filter_outliers(w_iZj_list: List[gtsam.BinaryMeasurementUnit3]) -> List[gtsam.BinaryMeasurementUnit3]:
"""Removes outliers from a list of Unit3 measurements that are the
translation directions from camera i to camera j in the world frame."""
@ -89,14 +88,14 @@ def filter_outliers(w_iZj_list: gtsam.BinaryMeasurementsUnit3) -> gtsam.BinaryMe
avg_outlier_weights[keypair] += weight / len(outlier_weights)
# Remove w_iZj that have weight greater than threshold, these are outliers.
w_iZj_inliers = gtsam.BinaryMeasurementsUnit3()
w_iZj_inliers = []
[w_iZj_inliers.append(w_iZj) for w_iZj in w_iZj_list if avg_outlier_weights[(
w_iZj.key1(), w_iZj.key2())] < OUTLIER_WEIGHT_THRESHOLD]
return w_iZj_inliers
def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3,
def estimate_poses(i_iZj_list: List[gtsam.BinaryMeasurementUnit3],
wRc_values: gtsam.Values) -> gtsam.Values:
"""Estimate poses given rotations and normalized translation directions between cameras.
@ -112,7 +111,7 @@ def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3,
"""
# Convert the translation direction measurements to world frame using the rotations.
w_iZj_list = gtsam.BinaryMeasurementsUnit3()
w_iZj_list = []
for i_iZj in i_iZj_list:
w_iZj = gtsam.Unit3(wRc_values.atRot3(i_iZj.key1())
.rotate(i_iZj.measured().point3()))

View File

@ -11,6 +11,7 @@
#include <pybind11/eigen.h>
#include <pybind11/stl_bind.h>
#include <pybind11/stl.h>
#include <pybind11/pybind11.h>
#include <pybind11/operators.h>
#include <pybind11/functional.h>

View File

@ -10,9 +10,3 @@
* Without this they will be automatically converted to a Python object, and all
* mutations on Python side will not be reflected on C++.
*/
PYBIND11_MAKE_OPAQUE(gtsam::IndexPairVector);
PYBIND11_MAKE_OPAQUE(gtsam::IndexPairSet);
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::Matrix>); // JacobianVector

View File

@ -10,5 +10,3 @@
* Without this they will be automatically converted to a Python object, and all
* mutations on Python side will not be reflected on C++.
*/
#include <pybind11/stl.h>

View File

@ -10,3 +10,6 @@
* Without this they will be automatically converted to a Python object, and all
* mutations on Python side will not be reflected on C++.
*/
// Added so that CustomFactor can pass the JacobianVector to the C++ side
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::Matrix>); // JacobianVector

View File

@ -11,5 +11,4 @@
* mutations on Python side will not be reflected on C++.
*/
#include <pybind11/stl.h>
PYBIND11_MAKE_OPAQUE(gtsam::DiscreteKeys);

View File

@ -10,14 +10,8 @@
* Without this they will be automatically converted to a Python object, and all
* mutations on Python side will not be reflected on C++.
*/
#include <pybind11/stl.h>
PYBIND11_MAKE_OPAQUE(
std::vector<gtsam::Point2, Eigen::aligned_allocator<gtsam::Point2>>);
PYBIND11_MAKE_OPAQUE(gtsam::Point2Pairs);
PYBIND11_MAKE_OPAQUE(gtsam::Point3Pairs);
PYBIND11_MAKE_OPAQUE(gtsam::Pose2Pairs);
PYBIND11_MAKE_OPAQUE(gtsam::Pose3Pairs);
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::Pose3>);
PYBIND11_MAKE_OPAQUE(gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Bundler>>);
PYBIND11_MAKE_OPAQUE(gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3_S2>>);
PYBIND11_MAKE_OPAQUE(gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Bundler>>);
PYBIND11_MAKE_OPAQUE(gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Unified>>);
PYBIND11_MAKE_OPAQUE(gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Fisheye>>);

View File

@ -10,8 +10,3 @@
* Without this they will be automatically converted to a Python object, and all
* mutations on Python side will not be reflected on C++.
*/
#ifdef GTSAM_ALLOCATOR_TBB
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::Key, tbb::tbb_allocator<gtsam::Key>>);
#else
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::Key>);
#endif

View File

@ -10,11 +10,3 @@
* Without this they will be automatically converted to a Python object, and all
* mutations on Python side will not be reflected on C++.
*/
#include <pybind11/stl.h>
// NOTE: Needed since we are including pybind11/stl.h.
#ifdef GTSAM_ALLOCATOR_TBB
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::Key, tbb::tbb_allocator<gtsam::Key>>);
#else
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::Key>);
#endif

View File

@ -11,17 +11,3 @@
* mutations on Python side will not be reflected on C++.
*/
// Including <stl.h> can cause some serious hard-to-debug bugs!!!
// #include <pybind11/stl.h>
#include <pybind11/stl_bind.h>
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::SfmMeasurement>);
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::SfmTrack>);
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::SfmCamera>);
PYBIND11_MAKE_OPAQUE(
std::vector<gtsam::BinaryMeasurement<gtsam::Unit3>>);
PYBIND11_MAKE_OPAQUE(
std::vector<gtsam::BinaryMeasurement<gtsam::Rot3>>);
PYBIND11_MAKE_OPAQUE(
std::vector<gtsam::gtsfm::Keypoints>);
PYBIND11_MAKE_OPAQUE(gtsam::gtsfm::MatchIndicesMap);

View File

@ -10,9 +10,3 @@
* Without this they will be automatically converted to a Python object, and all
* mutations on Python side will not be reflected on C++.
*/
PYBIND11_MAKE_OPAQUE(
std::vector<std::shared_ptr<gtsam::BetweenFactor<gtsam::Pose3> > >);
PYBIND11_MAKE_OPAQUE(
std::vector<std::shared_ptr<gtsam::BetweenFactor<gtsam::Pose2> > >);
PYBIND11_MAKE_OPAQUE(gtsam::Rot3Vector);

View File

@ -11,7 +11,3 @@
* and saves one copy operation.
*/
py::bind_map<gtsam::IndexPairSetMap>(m_, "IndexPairSetMap");
py::bind_vector<gtsam::IndexPairVector>(m_, "IndexPairVector");
py::bind_vector<std::vector<gtsam::Matrix> >(m_, "JacobianVector");

View File

@ -9,4 +9,7 @@
* interface, but without the `<pybind11/stl.h>` copying mechanism. Combined
* with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python,
* and saves one copy operation.
*/
*/
// Added so that CustomFactor can pass the JacobianVector to the C++ side
py::bind_vector<std::vector<gtsam::Matrix> >(m_, "JacobianVector");

View File

@ -11,14 +11,6 @@
* and saves one copy operation.
*/
py::bind_vector<
std::vector<gtsam::Point2, Eigen::aligned_allocator<gtsam::Point2>>>(
m_, "Point2Vector");
py::bind_vector<std::vector<gtsam::Point2Pair>>(m_, "Point2Pairs");
py::bind_vector<std::vector<gtsam::Point3Pair>>(m_, "Point3Pairs");
py::bind_vector<std::vector<gtsam::Pose2Pair>>(m_, "Pose2Pairs");
py::bind_vector<std::vector<gtsam::Pose3Pair>>(m_, "Pose3Pairs");
py::bind_vector<std::vector<gtsam::Pose3>>(m_, "Pose3Vector");
py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3_S2>>>(
m_, "CameraSetCal3_S2");
py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3DS2>>>(

View File

@ -10,11 +10,3 @@
* with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python,
* and saves one copy operation.
*/
#ifdef GTSAM_ALLOCATOR_TBB
py::bind_vector<std::vector<gtsam::Key, tbb::tbb_allocator<gtsam::Key> > >(m_, "KeyVector");
py::implicitly_convertible<py::list, std::vector<gtsam::Key, tbb::tbb_allocator<gtsam::Key> > >();
#else
py::bind_vector<std::vector<gtsam::Key> >(m_, "KeyVector");
py::implicitly_convertible<py::list, std::vector<gtsam::Key> >();
#endif

View File

@ -10,5 +10,3 @@
* with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python,
* and saves one copy operation.
*/
py::bind_map<std::map<char, double>>(m_, "__MapCharDouble");

View File

@ -11,18 +11,3 @@
* and saves one copy operation.
*/
py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Point3> > >(
m_, "BinaryMeasurementsPoint3");
py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Unit3> > >(
m_, "BinaryMeasurementsUnit3");
py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Rot3> > >(
m_, "BinaryMeasurementsRot3");
py::bind_map<gtsam::KeyPairDoubleMap>(m_, "KeyPairDoubleMap");
py::bind_vector<std::vector<gtsam::SfmTrack2d>>(m_, "SfmTrack2dVector");
py::bind_vector<std::vector<gtsam::SfmTrack>>(m_, "SfmTracks");
py::bind_vector<std::vector<gtsam::SfmCamera>>(m_, "SfmCameras");
py::bind_vector<std::vector<std::pair<size_t, gtsam::Point2>>>(
m_, "SfmMeasurementVector");
py::bind_map<gtsam::gtsfm::MatchIndicesMap>(m_, "MatchIndicesMap");
py::bind_vector<std::vector<gtsam::gtsfm::Keypoints>>(m_, "KeypointsVector");

View File

@ -10,11 +10,3 @@
* with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python,
* and saves one copy operation.
*/
py::bind_vector<
std::vector<std::shared_ptr<gtsam::BetweenFactor<gtsam::Pose3>>>>(
m_, "BetweenFactorPose3s");
py::bind_vector<
std::vector<std::shared_ptr<gtsam::BetweenFactor<gtsam::Pose2>>>>(
m_, "BetweenFactorPose2s");
py::bind_vector<gtsam::Rot3Vector>(m_, "Rot3Vector");

View File

@ -1,4 +1,4 @@
# This trick is to allow direct import of sub-modules
# without this, we can only do `from gtsam.gtsam.symbol_shorthand import X`
# with this trick, we can do `from gtsam.symbol_shorthand import X`
from .gtsam.symbol_shorthand import *
from .gtsam.symbol_shorthand import *

View File

@ -12,10 +12,10 @@ Refactored: Roderick Koehle
import unittest
import numpy as np
from gtsam.symbol_shorthand import K, L, P
from gtsam.utils.test_case import GtsamTestCase
import gtsam
from gtsam.utils.test_case import GtsamTestCase
from gtsam.symbol_shorthand import K, L, P
def ulp(ftype=np.float64):
@ -27,7 +27,7 @@ def ulp(ftype=np.float64):
class TestCal3Fisheye(GtsamTestCase):
@classmethod
def setUpClass(cls):
"""
@ -51,10 +51,10 @@ class TestCal3Fisheye(GtsamTestCase):
camera1 = gtsam.PinholeCameraCal3Fisheye(pose1)
camera2 = gtsam.PinholeCameraCal3Fisheye(pose2)
cls.origin = np.array([0.0, 0.0, 0.0])
cls.poses = gtsam.Pose3Vector([pose1, pose2])
cls.cameras = gtsam.CameraSetCal3Fisheye([camera1, camera2])
cls.measurements = gtsam.Point2Vector([k.project(cls.origin) for k in cls.cameras])
cls.poses = [pose1, pose2]
cls.cameras = [camera1, camera2]
cls.measurements = [k.project(cls.origin) for k in cls.cameras]
def test_Cal3Fisheye(self):
K = gtsam.Cal3Fisheye()
self.assertEqual(K.fx(), 1.)
@ -63,7 +63,7 @@ class TestCal3Fisheye(GtsamTestCase):
def test_distortion(self):
"""Fisheye distortion and rectification"""
equidistant = gtsam.Cal3Fisheye()
perspective_pt = self.obj_point[0:2]/self.obj_point[2]
perspective_pt = self.obj_point[0:2] / self.obj_point[2]
distorted_pt = equidistant.uncalibrate(perspective_pt)
rectified_pt = equidistant.calibrate(distorted_pt)
self.gtsamAssertEquals(distorted_pt, self.img_point)
@ -167,7 +167,7 @@ class TestCal3Fisheye(GtsamTestCase):
pose = gtsam.Pose3()
camera = gtsam.Cal3Fisheye()
state = gtsam.Values()
camera_key, pose_key, landmark_key = K(0), P(0), L(0)
pose_key, landmark_key = P(0), L(0)
state.insert_point3(landmark_key, obj_point)
state.insert_pose3(pose_key, pose)
g = gtsam.NonlinearFactorGraph()
@ -187,7 +187,7 @@ class TestCal3Fisheye(GtsamTestCase):
def test_triangulation_rectify(self):
"""Estimate spatial point from image measurements using rectification"""
rectified = gtsam.Point2Vector([k.calibration().calibrate(pt) for k, pt in zip(self.cameras, self.measurements)])
rectified = [k.calibration().calibrate(pt) for k, pt in zip(self.cameras, self.measurements)]
shared_cal = gtsam.Cal3_S2()
triangulated = gtsam.triangulatePoint3(self.poses, shared_cal, rectified, rank_tol=1e-9, optimize=False)
self.gtsamAssertEquals(triangulated, self.origin)

View File

@ -11,10 +11,10 @@ Author: Frank Dellaert & Duy Nguyen Ta (Python)
import unittest
import numpy as np
from gtsam.symbol_shorthand import K, L, P
from gtsam.utils.test_case import GtsamTestCase
import gtsam
from gtsam.utils.test_case import GtsamTestCase
from gtsam.symbol_shorthand import K, L, P
class TestCal3Unified(GtsamTestCase):
@ -48,9 +48,9 @@ class TestCal3Unified(GtsamTestCase):
camera1 = gtsam.PinholeCameraCal3Unified(pose1, cls.stereographic)
camera2 = gtsam.PinholeCameraCal3Unified(pose2, cls.stereographic)
cls.origin = np.array([0.0, 0.0, 0.0])
cls.poses = gtsam.Pose3Vector([pose1, pose2])
cls.cameras = gtsam.CameraSetCal3Unified([camera1, camera2])
cls.measurements = gtsam.Point2Vector([k.project(cls.origin) for k in cls.cameras])
cls.poses = [pose1, pose2]
cls.cameras = [camera1, camera2]
cls.measurements = [k.project(cls.origin) for k in cls.cameras]
def test_Cal3Unified(self):
K = gtsam.Cal3Unified()
@ -107,7 +107,7 @@ class TestCal3Unified(GtsamTestCase):
state = gtsam.Values()
measured = self.img_point
noise_model = gtsam.noiseModel.Isotropic.Sigma(2, 1)
camera_key, pose_key, landmark_key = K(0), P(0), L(0)
camera_key, pose_key, landmark_key = K(0), P(0), L(0)
k = self.stereographic
state.insert_cal3unified(camera_key, k)
state.insert_pose3(pose_key, gtsam.Pose3())
@ -142,7 +142,7 @@ class TestCal3Unified(GtsamTestCase):
Dcal = np.zeros((2, 10), order='F')
Dp = np.zeros((2, 2), order='F')
camera.calibrate(img_point, Dcal, Dp)
self.gtsamAssertEquals(Dcal, np.array(
[[ 0., 0., 0., -1., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., -1., 0., 0., 0., 0., 0.]]))
@ -158,7 +158,7 @@ class TestCal3Unified(GtsamTestCase):
def test_triangulation_rectify(self):
"""Estimate spatial point from image measurements using rectification"""
rectified = gtsam.Point2Vector([k.calibration().calibrate(pt) for k, pt in zip(self.cameras, self.measurements)])
rectified = [k.calibration().calibrate(pt) for k, pt in zip(self.cameras, self.measurements)]
shared_cal = gtsam.Cal3_S2()
triangulated = gtsam.triangulatePoint3(self.poses, shared_cal, rectified, rank_tol=1e-9, optimize=False)
self.gtsamAssertEquals(triangulated, self.origin)

View File

@ -13,7 +13,8 @@ Author: Frank Dellaert
import unittest
from gtsam import DecisionTreeFactor, DiscreteValues, DiscreteDistribution, Ordering
from gtsam import (DecisionTreeFactor, DiscreteDistribution, DiscreteValues,
Ordering)
from gtsam.utils.test_case import GtsamTestCase

View File

@ -5,13 +5,13 @@ Authors: John Lambert
import unittest
import gtsam
import numpy as np
from gtsam import (IndexPair, KeypointsVector, MatchIndicesMap, Point2,
SfmMeasurementVector, SfmTrack2d)
from gtsam.gtsfm import Keypoints
from gtsam.utils.test_case import GtsamTestCase
import gtsam
from gtsam import IndexPair, Point2, SfmTrack2d
class TestDsfTrackGenerator(GtsamTestCase):
"""Tests for DsfTrackGenerator."""
@ -23,14 +23,14 @@ class TestDsfTrackGenerator(GtsamTestCase):
kps_i1 = Keypoints(np.array([[50.0, 60], [70, 80], [90, 100]]))
kps_i2 = Keypoints(np.array([[110.0, 120], [130, 140]]))
keypoints_list = KeypointsVector()
keypoints_list = []
keypoints_list.append(kps_i0)
keypoints_list.append(kps_i1)
keypoints_list.append(kps_i2)
# For each image pair (i1,i2), we provide a (K,2) matrix
# of corresponding image indices (k1,k2).
matches_dict = MatchIndicesMap()
matches_dict = {}
matches_dict[IndexPair(0, 1)] = np.array([[0, 0], [1, 1]])
matches_dict[IndexPair(1, 2)] = np.array([[2, 0], [1, 1]])
@ -86,7 +86,7 @@ class TestSfmTrack2d(GtsamTestCase):
def test_sfm_track_2d_constructor(self) -> None:
"""Test construction of 2D SfM track."""
measurements = SfmMeasurementVector()
measurements = []
measurements.append((0, Point2(10, 20)))
track = SfmTrack2d(measurements=measurements)
track.measurement(0)

View File

@ -11,10 +11,10 @@ Author: Frank Dellaert & Duy Nguyen Ta (Python)
import unittest
import numpy as np
from gtsam.utils.test_case import GtsamTestCase
import gtsam
import gtsam_unstable
from gtsam.utils.test_case import GtsamTestCase
class TestFixedLagSmootherExample(GtsamTestCase):
'''
@ -36,7 +36,7 @@ class TestFixedLagSmootherExample(GtsamTestCase):
# that will be sent to the smoothers
new_factors = gtsam.NonlinearFactorGraph()
new_values = gtsam.Values()
new_timestamps = gtsam.FixedLagSmootherKeyTimestampMap()
new_timestamps = {}
# Create a prior on the first pose, placing it at the origin
prior_mean = gtsam.Pose2(0, 0, 0)
@ -47,7 +47,7 @@ class TestFixedLagSmootherExample(GtsamTestCase):
gtsam.PriorFactorPose2(
X1, prior_mean, prior_noise))
new_values.insert(X1, prior_mean)
new_timestamps.insert((X1, 0.0))
new_timestamps[X1] = 0.0
delta_time = 0.25
time = 0.25
@ -77,7 +77,7 @@ class TestFixedLagSmootherExample(GtsamTestCase):
current_key = int(1000 * time)
# assign current key to the current timestamp
new_timestamps.insert((current_key, time))
new_timestamps[current_key] = time
# Add a guess for this pose to the new values
# Assume that the robot moves at 2 m/s. Position is time[s] *

View File

@ -14,11 +14,12 @@ from __future__ import print_function
import unittest
import gtsam
import numpy as np
from gtsam.symbol_shorthand import X
from gtsam.utils.test_case import GtsamTestCase
import gtsam
def create_graph():
"""Create a basic linear factor graph for testing"""
@ -40,6 +41,7 @@ def create_graph():
class TestGaussianFactorGraph(GtsamTestCase):
"""Tests for Gaussian Factor Graphs."""
def test_fg(self):
"""Test solving a linear factor graph"""
graph, X = create_graph()
@ -98,12 +100,11 @@ class TestGaussianFactorGraph(GtsamTestCase):
bn = gfg.eliminateSequential(ordering)
self.assertEqual(bn.size(), 3)
keyVector = gtsam.KeyVector()
keyVector.append(keys[2])
#TODO(Varun) Below code causes segfault in Debug config
# ordering = gtsam.Ordering.ColamdConstrainedLastGaussianFactorGraph(gfg, keyVector)
# bn = gfg.eliminateSequential(ordering)
# self.assertEqual(bn.size(), 3)
keyVector = [keys[2]]
ordering = gtsam.Ordering.ColamdConstrainedLastGaussianFactorGraph(
gfg, keyVector)
bn = gfg.eliminateSequential(ordering)
self.assertEqual(bn.size(), 3)
if __name__ == '__main__':

View File

@ -17,8 +17,9 @@ import numpy as np
from gtsam.symbol_shorthand import A, X
from gtsam.utils.test_case import GtsamTestCase
from gtsam import (DiscreteConditional, DiscreteKeys, DiscreteValues, GaussianConditional,
GaussianMixture, HybridBayesNet, HybridValues, noiseModel, VectorValues)
from gtsam import (DiscreteConditional, DiscreteKeys, DiscreteValues,
GaussianConditional, GaussianMixture, HybridBayesNet,
HybridValues, VectorValues, noiseModel)
class TestHybridBayesNet(GtsamTestCase):
@ -31,8 +32,8 @@ class TestHybridBayesNet(GtsamTestCase):
# Create the continuous conditional
I_1x1 = np.eye(1)
conditional = GaussianConditional.FromMeanAndStddev(X(0), 2 * I_1x1, X(1), [-4],
5.0)
conditional = GaussianConditional.FromMeanAndStddev(
X(0), 2 * I_1x1, X(1), [-4], 5.0)
# Create the noise models
model0 = noiseModel.Diagonal.Sigmas([2.0])
@ -47,8 +48,9 @@ class TestHybridBayesNet(GtsamTestCase):
# Create hybrid Bayes net.
bayesNet = HybridBayesNet()
bayesNet.push_back(conditional)
bayesNet.push_back(GaussianMixture(
[X(1)], [], discrete_keys, [conditional0, conditional1]))
bayesNet.push_back(
GaussianMixture([X(1)], [], discrete_keys,
[conditional0, conditional1]))
bayesNet.push_back(DiscreteConditional(Asia, "99/1"))
# Create values at which to evaluate.
@ -76,7 +78,7 @@ class TestHybridBayesNet(GtsamTestCase):
self.check_invariance(bayesNet.at(0).asGaussian(), continuous)
self.check_invariance(bayesNet.at(0).asGaussian(), values)
self.check_invariance(bayesNet.at(0), values)
self.check_invariance(bayesNet.at(1), values)
self.check_invariance(bayesNet.at(2).asDiscrete(), discrete)
@ -89,7 +91,8 @@ class TestHybridBayesNet(GtsamTestCase):
self.assertTrue(probability >= 0.0)
logProb = conditional.logProbability(values)
self.assertAlmostEqual(probability, np.exp(logProb))
expected = conditional.logNormalizationConstant() - conditional.error(values)
expected = conditional.logNormalizationConstant() - \
conditional.error(values)
self.assertAlmostEqual(logProb, expected)

View File

@ -13,15 +13,15 @@ Author: Frank Dellaert
import unittest
import gtsam
import numpy as np
from gtsam import Rot3
from gtsam.utils.test_case import GtsamTestCase
import gtsam
from gtsam import Rot3
KEY = 0
MODEL = gtsam.noiseModel.Unit.Create(3)
# Rot3 version
R = Rot3.Expmap(np.array([0.1, 0, 0]))
@ -29,9 +29,11 @@ R = Rot3.Expmap(np.array([0.1, 0, 0]))
class TestKarcherMean(GtsamTestCase):
def test_find(self):
# Check that optimizing for Karcher mean (which minimizes Between distance)
# gets correct result.
rotations = gtsam.Rot3Vector([R, R.inverse()])
"""
Check that optimizing for Karcher mean (which minimizes Between distance)
gets correct result.
"""
rotations = [R, R.inverse()]
expected = Rot3()
actual = gtsam.FindKarcherMean(rotations)
self.gtsamAssertEquals(expected, actual)
@ -42,7 +44,7 @@ class TestKarcherMean(GtsamTestCase):
a2Rb2 = Rot3()
a3Rb3 = Rot3()
aRb_list = gtsam.Rot3Vector([a1Rb1, a2Rb2, a3Rb3])
aRb_list = [a1Rb1, a2Rb2, a3Rb3]
aRb_expected = Rot3()
aRb = gtsam.FindKarcherMean(aRb_list)
@ -58,9 +60,7 @@ class TestKarcherMean(GtsamTestCase):
graph = gtsam.NonlinearFactorGraph()
R12 = R.compose(R.compose(R))
graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL))
keys = gtsam.KeyVector()
keys.append(1)
keys.append(2)
keys = [1, 2]
graph.add(gtsam.KarcherMeanFactorRot3(keys))
initial = gtsam.Values()
@ -69,11 +69,9 @@ class TestKarcherMean(GtsamTestCase):
expected = Rot3()
result = gtsam.GaussNewtonOptimizer(graph, initial).optimize()
actual = gtsam.FindKarcherMean(
gtsam.Rot3Vector([result.atRot3(1), result.atRot3(2)]))
actual = gtsam.FindKarcherMean([result.atRot3(1), result.atRot3(2)])
self.gtsamAssertEquals(expected, actual)
self.gtsamAssertEquals(
R12, result.atRot3(1).between(result.atRot3(2)))
self.gtsamAssertEquals(R12, result.atRot3(1).between(result.atRot3(2)))
if __name__ == "__main__":

View File

@ -11,14 +11,15 @@ Author: Frank Dellaert & Duy Nguyen Ta & John Lambert
import math
import unittest
import gtsam
import numpy as np
from gtsam import Point2, Point2Pairs, Pose2
from gtsam.utils.test_case import GtsamTestCase
from gtsam import Point2, Point2Pairs, Pose2
class TestPose2(GtsamTestCase):
"""Test selected Pose2 methods."""
def test_adjoint(self) -> None:
"""Test adjoint method."""
xi = np.array([1, 2, 3])
@ -28,7 +29,7 @@ class TestPose2(GtsamTestCase):
def test_transformTo(self):
"""Test transformTo method."""
pose = Pose2(2, 4, -math.pi/2)
pose = Pose2(2, 4, -math.pi / 2)
actual = pose.transformTo(Point2(3, 2))
expected = Point2(2, 1)
self.gtsamAssertEquals(actual, expected, 1e-6)
@ -42,7 +43,7 @@ class TestPose2(GtsamTestCase):
def test_transformFrom(self):
"""Test transformFrom method."""
pose = Pose2(2, 4, -math.pi/2)
pose = Pose2(2, 4, -math.pi / 2)
actual = pose.transformFrom(Point2(2, 1))
expected = Point2(3, 2)
self.gtsamAssertEquals(actual, expected, 1e-6)
@ -83,7 +84,7 @@ class TestPose2(GtsamTestCase):
]
# fmt: on
ab_pairs = Point2Pairs(list(zip(pts_a, pts_b)))
ab_pairs = list(zip(pts_a, pts_b))
aTb = Pose2.Align(ab_pairs)
self.assertIsNotNone(aTb)

View File

@ -13,10 +13,10 @@ import math
import unittest
import numpy as np
from gtsam.utils.test_case import GtsamTestCase
import gtsam
from gtsam import Point3, Pose3, Rot3, Point3Pairs
from gtsam.utils.test_case import GtsamTestCase
from gtsam import Point3, Pose3, Rot3
def numerical_derivative_pose(pose, method, delta=1e-5):
@ -223,7 +223,7 @@ class TestPose3(GtsamTestCase):
sTt = Pose3(Rot3.Rodrigues(0, 0, -math.pi), Point3(2, 4, 0))
transformed = sTt.transformTo(square)
st_pairs = Point3Pairs()
st_pairs = []
for j in range(4):
st_pairs.append((square[:,j], transformed[:,j]))
@ -237,4 +237,4 @@ class TestPose3(GtsamTestCase):
if __name__ == "__main__":
unittest.main()
unittest.main()

View File

@ -12,20 +12,14 @@ Author: Frank Dellaert
import unittest
import gtsam
import numpy as np
from gtsam import (
BetweenFactorPose2,
LevenbergMarquardtParams,
Rot2,
Pose2,
ShonanAveraging2,
ShonanAveragingParameters2,
ShonanAveraging3,
ShonanAveragingParameters3,
)
from gtsam.utils.test_case import GtsamTestCase
import gtsam
from gtsam import (BetweenFactorPose2, LevenbergMarquardtParams, Pose2, Rot2,
ShonanAveraging2, ShonanAveraging3,
ShonanAveragingParameters2, ShonanAveragingParameters3)
DEFAULT_PARAMS = ShonanAveragingParameters3(
gtsam.LevenbergMarquardtParams.CeresDefaults()
)
@ -146,7 +140,6 @@ class TestShonanAveraging(GtsamTestCase):
self.assertAlmostEqual(3.0756, shonan.cost(initial), places=3)
result, _lambdaMin = shonan.run(initial, 3, 3)
self.assertAlmostEqual(0.0015, shonan.cost(result), places=3)
def test_constructorBetweenFactorPose2s(self) -> None:
"""Check if ShonanAveraging2 constructor works when not initialized from g2o file.
@ -183,7 +176,7 @@ class TestShonanAveraging(GtsamTestCase):
shonan_params.setCertifyOptimality(True)
noise_model = gtsam.noiseModel.Unit.Create(3)
between_factors = gtsam.BetweenFactorPose2s()
between_factors = []
for (i1, i2), i2Ri1 in i2Ri1_dict.items():
i2Ti1 = Pose2(i2Ri1, np.zeros(2))
between_factors.append(
@ -196,11 +189,11 @@ class TestShonanAveraging(GtsamTestCase):
wRi_list = [result_values.atRot2(i) for i in range(num_images)]
thetas_deg = np.array([wRi.degrees() for wRi in wRi_list])
# map all angles to [0,360)
thetas_deg = thetas_deg % 360
thetas_deg -= thetas_deg[0]
expected_thetas_deg = np.array([0.0, 90.0, 0.0])
np.testing.assert_allclose(thetas_deg, expected_thetas_deg, atol=0.1)

View File

@ -12,15 +12,16 @@ Author: John Lambert
import unittest
import numpy as np
from gtsam import Pose2, Pose2Pairs, Rot2, Similarity2
from gtsam.utils.test_case import GtsamTestCase
from gtsam import Pose2, Rot2, Similarity2
class TestSim2(GtsamTestCase):
"""Test selected Sim2 methods."""
def test_align_poses_along_straight_line(self) -> None:
"""Test Align Pose2Pairs method.
"""Test Align of list of Pose2Pair.
Scenario:
3 object poses
@ -46,7 +47,7 @@ class TestSim2(GtsamTestCase):
wToi_list = [wTo0, wTo1, wTo2]
we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list)))
we_pairs = list(zip(wToi_list, eToi_list))
# Recover the transformation wSe (i.e. world_S_egovehicle)
wSe = Similarity2.Align(we_pairs)
@ -55,7 +56,7 @@ class TestSim2(GtsamTestCase):
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
def test_align_poses_along_straight_line_gauge(self):
"""Test if Align Pose3Pairs method can account for gauge ambiguity.
"""Test if Pose2 Align method can account for gauge ambiguity.
Scenario:
3 object poses
@ -81,7 +82,7 @@ class TestSim2(GtsamTestCase):
wToi_list = [wTo0, wTo1, wTo2]
we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list)))
we_pairs = list(zip(wToi_list, eToi_list))
# Recover the transformation wSe (i.e. world_S_egovehicle)
wSe = Similarity2.Align(we_pairs)
@ -90,7 +91,7 @@ class TestSim2(GtsamTestCase):
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
def test_align_poses_scaled_squares(self):
"""Test if Align Pose2Pairs method can account for gauge ambiguity.
"""Test if Align method can account for gauge ambiguity.
Make sure a big and small square can be aligned.
The u's represent a big square (10x10), and v's represents a small square (4x4).
@ -119,7 +120,7 @@ class TestSim2(GtsamTestCase):
bTi_list = [bTi0, bTi1, bTi2, bTi3]
ab_pairs = Pose2Pairs(list(zip(aTi_list, bTi_list)))
ab_pairs = list(zip(aTi_list, bTi_list))
# Recover the transformation wSe (i.e. world_S_egovehicle)
aSb = Similarity2.Align(ab_pairs)

View File

@ -13,17 +13,17 @@ import unittest
from typing import List, Optional
import numpy as np
from gtsam.utils.test_case import GtsamTestCase
import gtsam
from gtsam import Point3, Pose3, Pose3Pairs, Rot3, Similarity3
from gtsam.utils.test_case import GtsamTestCase
from gtsam import Point3, Pose3, Rot3, Similarity3
class TestSim3(GtsamTestCase):
"""Test selected Sim3 methods."""
def test_align_poses_along_straight_line(self):
"""Test Align Pose3Pairs method.
"""Test Pose3 Align method.
Scenario:
3 object poses
@ -49,7 +49,7 @@ class TestSim3(GtsamTestCase):
wToi_list = [wTo0, wTo1, wTo2]
we_pairs = Pose3Pairs(list(zip(wToi_list, eToi_list)))
we_pairs = list(zip(wToi_list, eToi_list))
# Recover the transformation wSe (i.e. world_S_egovehicle)
wSe = Similarity3.Align(we_pairs)
@ -58,7 +58,7 @@ class TestSim3(GtsamTestCase):
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
def test_align_poses_along_straight_line_gauge(self):
"""Test if Align Pose3Pairs method can account for gauge ambiguity.
"""Test if Pose3 Align method can account for gauge ambiguity.
Scenario:
3 object poses
@ -84,7 +84,7 @@ class TestSim3(GtsamTestCase):
wToi_list = [wTo0, wTo1, wTo2]
we_pairs = Pose3Pairs(list(zip(wToi_list, eToi_list)))
we_pairs = list(zip(wToi_list, eToi_list))
# Recover the transformation wSe (i.e. world_S_egovehicle)
wSe = Similarity3.Align(we_pairs)
@ -93,7 +93,7 @@ class TestSim3(GtsamTestCase):
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
def test_align_poses_scaled_squares(self):
"""Test if Align Pose3Pairs method can account for gauge ambiguity.
"""Test if Pose3 Align method can account for gauge ambiguity.
Make sure a big and small square can be aligned.
The u's represent a big square (10x10), and v's represents a small square (4x4).
@ -122,7 +122,7 @@ class TestSim3(GtsamTestCase):
bTi_list = [bTi0, bTi1, bTi2, bTi3]
ab_pairs = Pose3Pairs(list(zip(aTi_list, bTi_list)))
ab_pairs = list(zip(aTi_list, bTi_list))
# Recover the transformation wSe (i.e. world_S_egovehicle)
aSb = Similarity3.Align(ab_pairs)
@ -689,7 +689,7 @@ def align_poses_sim3(aTi_list: List[Pose3], bTi_list: List[Pose3]) -> Similarity
assert len(aTi_list) == len(bTi_list)
assert n_to_align >= 2, "SIM(3) alignment uses at least 2 frames"
ab_pairs = Pose3Pairs(list(zip(aTi_list, bTi_list)))
ab_pairs = list(zip(aTi_list, bTi_list))
aSb = Similarity3.Align(ab_pairs)

View File

@ -1,25 +1,25 @@
from __future__ import print_function
import numpy as np
import unittest
import gtsam
import numpy as np
""" Returns example pose values of 3 points A, B and C in the world frame """
def ExampleValues():
""" Returns example pose values of 3 points A, B and C in the world frame """
T = []
T.append(gtsam.Point3(np.array([3.14, 1.59, 2.65])))
T.append(gtsam.Point3(np.array([-1.0590e+00, -3.6017e-02, -1.5720e+00])))
T.append(gtsam.Point3(np.array([8.5034e+00, 6.7499e+00, -3.6383e+00])))
data = gtsam.Values()
for i in range(len(T)):
data.insert(i, gtsam.Pose3(gtsam.Rot3(), T[i]))
for i, t in enumerate(T):
data.insert(i, gtsam.Pose3(gtsam.Rot3(), t))
return data
""" Returns binary measurements for the points in the given edges."""
def SimulateMeasurements(gt_poses, graph_edges):
measurements = gtsam.BinaryMeasurementsUnit3()
""" Returns binary measurements for the points in the given edges."""
measurements = []
for edge in graph_edges:
Ta = gt_poses.atPose3(edge[0]).translation()
Tb = gt_poses.atPose3(edge[1]).translation()
@ -28,18 +28,20 @@ def SimulateMeasurements(gt_poses, graph_edges):
gtsam.noiseModel.Isotropic.Sigma(3, 0.01)))
return measurements
""" Tests for the translation recovery class """
class TestTranslationRecovery(unittest.TestCase):
"""Test selected Translation Recovery methods."""
""" Tests for the translation recovery class."""
def test_constructor(self):
"""Construct from binary measurements."""
algorithm = gtsam.TranslationRecovery()
self.assertIsInstance(algorithm, gtsam.TranslationRecovery)
algorithm_params = gtsam.TranslationRecovery(gtsam.LevenbergMarquardtParams())
algorithm_params = gtsam.TranslationRecovery(
gtsam.LevenbergMarquardtParams())
self.assertIsInstance(algorithm_params, gtsam.TranslationRecovery)
def test_run(self):
"""Test selected Translation Recovery methods."""
gt_poses = ExampleValues()
measurements = SimulateMeasurements(gt_poses, [[0, 1], [0, 2], [1, 2]])
@ -51,15 +53,23 @@ class TestTranslationRecovery(unittest.TestCase):
scale = 2.0
result = algorithm.run(measurements, scale)
w_aTc = gt_poses.atPose3(2).translation() - gt_poses.atPose3(0).translation()
w_aTb = gt_poses.atPose3(1).translation() - gt_poses.atPose3(0).translation()
w_aTc_expected = w_aTc*scale/np.linalg.norm(w_aTb)
w_aTb_expected = w_aTb*scale/np.linalg.norm(w_aTb)
w_aTc = gt_poses.atPose3(2).translation() - gt_poses.atPose3(
0).translation()
w_aTb = gt_poses.atPose3(1).translation() - gt_poses.atPose3(
0).translation()
w_aTc_expected = w_aTc * scale / np.linalg.norm(w_aTb)
w_aTb_expected = w_aTb * scale / np.linalg.norm(w_aTb)
np.testing.assert_array_almost_equal(result.atPoint3(0),
np.array([0, 0, 0]), 6,
"Origin result is incorrect.")
np.testing.assert_array_almost_equal(result.atPoint3(1),
w_aTb_expected, 6,
"Point B result is incorrect.")
np.testing.assert_array_almost_equal(result.atPoint3(2),
w_aTc_expected, 6,
"Point C result is incorrect.")
np.testing.assert_array_almost_equal(result.atPoint3(0), np.array([0,0,0]), 6, "Origin result is incorrect.")
np.testing.assert_array_almost_equal(result.atPoint3(1), w_aTb_expected, 6, "Point B result is incorrect.")
np.testing.assert_array_almost_equal(result.atPoint3(2), w_aTc_expected, 6, "Point C result is incorrect.")
if __name__ == "__main__":
unittest.main()

View File

@ -11,15 +11,15 @@ Authors: Frank Dellaert & Fan Jiang (Python) & Sushmita Warrier & John Lambert
# pylint: disable=no-name-in-module, invalid-name, no-member
import unittest
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
from gtsam.utils.test_case import GtsamTestCase
import gtsam
from gtsam import (Cal3_S2, Cal3Bundler, CameraSetCal3_S2,
CameraSetCal3Bundler, PinholeCameraCal3_S2,
PinholeCameraCal3Bundler, Point2, Point2Vector, Point3,
Pose3, Pose3Vector, Rot3, TriangulationParameters,
TriangulationResult)
from gtsam.utils.test_case import GtsamTestCase
PinholeCameraCal3Bundler, Point2, Point3, Pose3, Rot3,
TriangulationParameters, TriangulationResult)
UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2)
@ -35,9 +35,7 @@ class TestTriangulationExample(GtsamTestCase):
# create second camera 1 meter to the right of first camera
pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0)))
# twoPoses
self.poses = Pose3Vector()
self.poses.append(pose1)
self.poses.append(pose2)
self.poses = [pose1, pose2]
# landmark ~5 meters infront of camera
self.landmark = Point3(5, 0.5, 1.2)
@ -49,7 +47,7 @@ class TestTriangulationExample(GtsamTestCase):
cal_params: Iterable[Iterable[Union[int, float]]],
camera_set: Optional[Union[CameraSetCal3Bundler,
CameraSetCal3_S2]] = None,
) -> Tuple[Point2Vector, Union[CameraSetCal3Bundler, CameraSetCal3_S2,
) -> Tuple[List[Point2], Union[CameraSetCal3Bundler, CameraSetCal3_S2,
List[Cal3Bundler], List[Cal3_S2]]]:
"""
Generate vector of measurements for given calibration and camera model.
@ -67,7 +65,7 @@ class TestTriangulationExample(GtsamTestCase):
cameras = camera_set()
else:
cameras = []
measurements = Point2Vector()
measurements = []
for k, pose in zip(cal_params, self.poses):
K = calibration(*k)
@ -96,7 +94,7 @@ class TestTriangulationExample(GtsamTestCase):
self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-9)
# Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814)
measurements_noisy = Point2Vector()
measurements_noisy = []
measurements_noisy.append(measurements[0] - np.array([0.1, 0.5]))
measurements_noisy.append(measurements[1] - np.array([-0.2, 0.3]))
@ -163,8 +161,8 @@ class TestTriangulationExample(GtsamTestCase):
z2: Point2 = camera2.project(landmark)
z3: Point2 = camera3.project(landmark)
poses = gtsam.Pose3Vector([pose1, pose2, pose3])
measurements = Point2Vector([z1, z2, z3])
poses = [pose1, pose2, pose3]
measurements = [z1, z2, z3]
# noise free, so should give exactly the landmark
actual = gtsam.triangulatePoint3(poses,
@ -226,15 +224,15 @@ class TestTriangulationExample(GtsamTestCase):
z2 = camera2.project(self.landmark)
cameras = CameraSetCal3_S2()
measurements = Point2Vector()
cameras.append(camera1)
cameras.append(camera2)
measurements = []
measurements.append(z1)
measurements.append(z2)
landmarkDistanceThreshold = 10 # landmark is closer than that
# all default except landmarkDistanceThreshold:
# all default except landmarkDistanceThreshold:
params = TriangulationParameters(1.0, False, landmarkDistanceThreshold)
actual: TriangulationResult = gtsam.triangulateSafe(
cameras, measurements, params)
@ -242,8 +240,8 @@ class TestTriangulationExample(GtsamTestCase):
self.assertTrue(actual.valid())
landmarkDistanceThreshold = 4 # landmark is farther than that
params2 = TriangulationParameters(
1.0, False, landmarkDistanceThreshold)
params2 = TriangulationParameters(1.0, False,
landmarkDistanceThreshold)
actual = gtsam.triangulateSafe(cameras, measurements, params2)
self.assertTrue(actual.farPoint())
@ -258,15 +256,17 @@ class TestTriangulationExample(GtsamTestCase):
measurements.append(z3 + Point2(10, -10))
landmarkDistanceThreshold = 10 # landmark is closer than that
outlierThreshold = 100 # loose, the outlier is going to pass
params3 = TriangulationParameters(1.0, False, landmarkDistanceThreshold,
outlierThreshold = 100 # loose, the outlier is going to pass
params3 = TriangulationParameters(1.0, False,
landmarkDistanceThreshold,
outlierThreshold)
actual = gtsam.triangulateSafe(cameras, measurements, params3)
self.assertTrue(actual.valid())
# now set stricter threshold for outlier rejection
outlierThreshold = 5 # tighter, the outlier is not going to pass
params4 = TriangulationParameters(1.0, False, landmarkDistanceThreshold,
params4 = TriangulationParameters(1.0, False,
landmarkDistanceThreshold,
outlierThreshold)
actual = gtsam.triangulateSafe(cameras, measurements, params4)
self.assertTrue(actual.outlier())

View File

@ -0,0 +1,897 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
Unit tests to ensure backwards compatibility of the Python wrapper.
Author: Varun Agrawal
"""
import unittest
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
from gtsam.gtsfm import Keypoints
from gtsam.symbol_shorthand import X
from gtsam.utils.test_case import GtsamTestCase
import gtsam
from gtsam import (BetweenFactorPose2, Cal3_S2, Cal3Bundler, CameraSetCal3_S2,
CameraSetCal3Bundler, IndexPair, LevenbergMarquardtParams,
PinholeCameraCal3_S2, PinholeCameraCal3Bundler, Point2,
Point2Pairs, Point3, Pose2, Pose2Pairs, Pose3, Rot2, Rot3,
SfmTrack2d, ShonanAveraging2, ShonanAveragingParameters2,
Similarity2, Similarity3, TriangulationParameters,
TriangulationResult)
UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2)
class TestBackwardsCompatibility(GtsamTestCase):
"""Tests for the backwards compatibility for the Python wrapper."""
def setUp(self):
"""Setup test fixtures"""
p1 = [-1.0, 0.0, -1.0]
p2 = [1.0, 0.0, -1.0]
q1 = Rot3(1.0, 0.0, 0.0, 0.0)
q2 = Rot3(1.0, 0.0, 0.0, 0.0)
pose1 = Pose3(q1, p1)
pose2 = Pose3(q2, p2)
camera1 = gtsam.PinholeCameraCal3Fisheye(pose1)
camera2 = gtsam.PinholeCameraCal3Fisheye(pose2)
self.origin = np.array([0.0, 0.0, 0.0])
self.poses = gtsam.Pose3Vector([pose1, pose2])
self.fisheye_cameras = gtsam.CameraSetCal3Fisheye([camera1, camera2])
self.fisheye_measurements = gtsam.Point2Vector(
[k.project(self.origin) for k in self.fisheye_cameras])
fx, fy, s, u0, v0 = 2, 2, 0, 0, 0
k1, k2, p1, p2 = 0, 0, 0, 0
xi = 1
self.stereographic = gtsam.Cal3Unified(fx, fy, s, u0, v0, k1, k2, p1,
p2, xi)
camera1 = gtsam.PinholeCameraCal3Unified(pose1, self.stereographic)
camera2 = gtsam.PinholeCameraCal3Unified(pose2, self.stereographic)
self.unified_cameras = gtsam.CameraSetCal3Unified([camera1, camera2])
self.unified_measurements = gtsam.Point2Vector(
[k.project(self.origin) for k in self.unified_cameras])
## Set up two camera poses
# Looking along X-axis, 1 meter above ground plane (x-y)
pose1 = Pose3(UPRIGHT, Point3(0, 0, 1))
# create second camera 1 meter to the right of first camera
pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0)))
# twoPoses
self.triangulation_poses = gtsam.Pose3Vector()
self.triangulation_poses.append(pose1)
self.triangulation_poses.append(pose2)
# landmark ~5 meters infront of camera
self.landmark = Point3(5, 0.5, 1.2)
def test_Cal3Fisheye_triangulation_rectify(self):
"""
Estimate spatial point from image measurements using
rectification from a Cal3Fisheye camera model.
"""
rectified = gtsam.Point2Vector([
k.calibration().calibrate(pt)
for k, pt in zip(self.fisheye_cameras, self.fisheye_measurements)
])
shared_cal = gtsam.Cal3_S2()
triangulated = gtsam.triangulatePoint3(self.poses,
shared_cal,
rectified,
rank_tol=1e-9,
optimize=False)
self.gtsamAssertEquals(triangulated, self.origin)
def test_Cal3Unified_triangulation_rectify(self):
"""
Estimate spatial point from image measurements using
rectification from a Cal3Unified camera model.
"""
rectified = gtsam.Point2Vector([
k.calibration().calibrate(pt)
for k, pt in zip(self.unified_cameras, self.unified_measurements)
])
shared_cal = gtsam.Cal3_S2()
triangulated = gtsam.triangulatePoint3(self.poses,
shared_cal,
rectified,
rank_tol=1e-9,
optimize=False)
self.gtsamAssertEquals(triangulated, self.origin)
def test_track_generation(self) -> None:
"""Ensures that DSF generates three tracks from measurements
in 3 images (H=200,W=400)."""
kps_i0 = Keypoints(np.array([[10.0, 20], [30, 40]]))
kps_i1 = Keypoints(np.array([[50.0, 60], [70, 80], [90, 100]]))
kps_i2 = Keypoints(np.array([[110.0, 120], [130, 140]]))
keypoints_list = gtsam.KeypointsVector()
keypoints_list.append(kps_i0)
keypoints_list.append(kps_i1)
keypoints_list.append(kps_i2)
# For each image pair (i1,i2), we provide a (K,2) matrix
# of corresponding image indices (k1,k2).
matches_dict = gtsam.MatchIndicesMap()
matches_dict[IndexPair(0, 1)] = np.array([[0, 0], [1, 1]])
matches_dict[IndexPair(1, 2)] = np.array([[2, 0], [1, 1]])
tracks = gtsam.gtsfm.tracksFromPairwiseMatches(
matches_dict,
keypoints_list,
verbose=False,
)
assert len(tracks) == 3
# Verify track 0.
track0 = tracks[0]
assert track0.numberMeasurements() == 2
np.testing.assert_allclose(track0.measurements[0][1], Point2(10, 20))
np.testing.assert_allclose(track0.measurements[1][1], Point2(50, 60))
assert track0.measurements[0][0] == 0
assert track0.measurements[1][0] == 1
np.testing.assert_allclose(
track0.measurementMatrix(),
[
[10, 20],
[50, 60],
],
)
np.testing.assert_allclose(track0.indexVector(), [0, 1])
# Verify track 1.
track1 = tracks[1]
np.testing.assert_allclose(
track1.measurementMatrix(),
[
[30, 40],
[70, 80],
[130, 140],
],
)
np.testing.assert_allclose(track1.indexVector(), [0, 1, 2])
# Verify track 2.
track2 = tracks[2]
np.testing.assert_allclose(
track2.measurementMatrix(),
[
[90, 100],
[110, 120],
],
)
np.testing.assert_allclose(track2.indexVector(), [1, 2])
def test_sfm_track_2d_constructor(self) -> None:
"""Test construction of 2D SfM track."""
measurements = gtsam.SfmMeasurementVector()
measurements.append((0, Point2(10, 20)))
track = SfmTrack2d(measurements=measurements)
track.measurement(0)
assert track.numberMeasurements() == 1
def test_FixedLagSmootherExample(self):
'''
Simple test that checks for equality between C++ example
file and the Python implementation. See
gtsam_unstable/examples/FixedLagSmootherExample.cpp
'''
# Define a batch fixed lag smoother, which uses
# Levenberg-Marquardt to perform the nonlinear optimization
lag = 2.0
smoother_batch = gtsam.BatchFixedLagSmoother(lag)
# Create containers to store the factors and linearization points
# that will be sent to the smoothers
new_factors = gtsam.NonlinearFactorGraph()
new_values = gtsam.Values()
new_timestamps = gtsam.FixedLagSmootherKeyTimestampMap()
# Create a prior on the first pose, placing it at the origin
prior_mean = Pose2(0, 0, 0)
prior_noise = gtsam.noiseModel.Diagonal.Sigmas(
np.array([0.3, 0.3, 0.1]))
X1 = 0
new_factors.push_back(
gtsam.PriorFactorPose2(X1, prior_mean, prior_noise))
new_values.insert(X1, prior_mean)
new_timestamps.insert((X1, 0.0))
delta_time = 0.25
time = 0.25
i = 0
ground_truth = [
Pose2(0.995821, 0.0231012, 0.0300001),
Pose2(1.49284, 0.0457247, 0.045),
Pose2(1.98981, 0.0758879, 0.06),
Pose2(2.48627, 0.113502, 0.075),
Pose2(2.98211, 0.158558, 0.09),
Pose2(3.47722, 0.211047, 0.105),
Pose2(3.97149, 0.270956, 0.12),
Pose2(4.4648, 0.338272, 0.135),
Pose2(4.95705, 0.41298, 0.15),
Pose2(5.44812, 0.495063, 0.165),
Pose2(5.9379, 0.584503, 0.18),
]
# Iterates from 0.25s to 3.0s, adding 0.25s each loop
# In each iteration, the agent moves at a constant speed
# and its two odometers measure the change. The smoothed
# result is then compared to the ground truth
while time <= 3.0:
previous_key = int(1000 * (time - delta_time))
current_key = int(1000 * time)
# assign current key to the current timestamp
new_timestamps.insert((current_key, time))
# Add a guess for this pose to the new values
# Assume that the robot moves at 2 m/s. Position is time[s] *
# 2[m/s]
current_pose = Pose2(time * 2, 0, 0)
new_values.insert(current_key, current_pose)
# Add odometry factors from two different sources with different
# error stats
odometry_measurement_1 = Pose2(0.61, -0.08, 0.02)
odometry_noise_1 = gtsam.noiseModel.Diagonal.Sigmas(
np.array([0.1, 0.1, 0.05]))
new_factors.push_back(
gtsam.BetweenFactorPose2(previous_key, current_key,
odometry_measurement_1,
odometry_noise_1))
odometry_measurement_2 = Pose2(0.47, 0.03, 0.01)
odometry_noise_2 = gtsam.noiseModel.Diagonal.Sigmas(
np.array([0.05, 0.05, 0.05]))
new_factors.push_back(
gtsam.BetweenFactorPose2(previous_key, current_key,
odometry_measurement_2,
odometry_noise_2))
# Update the smoothers with the new factors. In this case,
# one iteration must pass for Levenberg-Marquardt to accurately
# estimate
if time >= 0.50:
smoother_batch.update(new_factors, new_values, new_timestamps)
estimate = smoother_batch.calculateEstimatePose2(current_key)
self.assertTrue(estimate.equals(ground_truth[i], 1e-4))
i += 1
new_timestamps.clear()
new_values.clear()
new_factors.resize(0)
time += delta_time
def test_ordering(self):
"""Test ordering"""
gfg = gtsam.GaussianFactorGraph()
x0 = X(0)
x1 = X(1)
x2 = X(2)
BETWEEN_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1))
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1))
gfg.add(x1, np.eye(1), x0, -np.eye(1), np.ones(1), BETWEEN_NOISE)
gfg.add(x2, np.eye(1), x1, -np.eye(1), 2 * np.ones(1), BETWEEN_NOISE)
gfg.add(x0, np.eye(1), np.zeros(1), PRIOR_NOISE)
keys = (x0, x1, x2)
ordering = gtsam.Ordering()
for key in keys[::-1]:
ordering.push_back(key)
bn = gfg.eliminateSequential(ordering)
self.assertEqual(bn.size(), 3)
keyVector = gtsam.KeyVector()
keyVector.append(keys[2])
ordering = gtsam.Ordering.ColamdConstrainedLastGaussianFactorGraph(
gfg, keyVector)
bn = gfg.eliminateSequential(ordering)
self.assertEqual(bn.size(), 3)
def test_find(self):
"""
Check that optimizing for Karcher mean (which minimizes Between distance)
gets correct result.
"""
R = Rot3.Expmap(np.array([0.1, 0, 0]))
rotations = gtsam.Rot3Vector([R, R.inverse()])
expected = Rot3()
actual = gtsam.FindKarcherMean(rotations)
self.gtsamAssertEquals(expected, actual)
def test_find_karcher_mean_identity(self):
"""Averaging 3 identity rotations should yield the identity."""
a1Rb1 = Rot3()
a2Rb2 = Rot3()
a3Rb3 = Rot3()
aRb_list = gtsam.Rot3Vector([a1Rb1, a2Rb2, a3Rb3])
aRb_expected = Rot3()
aRb = gtsam.FindKarcherMean(aRb_list)
self.gtsamAssertEquals(aRb, aRb_expected)
def test_factor(self):
"""Check that the InnerConstraint factor leaves the mean unchanged."""
# Make a graph with two variables, one between, and one InnerConstraint
# The optimal result should satisfy the between, while moving the other
# variable to make the mean the same as before.
# Mean of R and R' is identity. Let's make a BetweenFactor making R21 =
# R*R*R, i.e. geodesic length is 3 rather than 2.
R = Rot3.Expmap(np.array([0.1, 0, 0]))
MODEL = gtsam.noiseModel.Unit.Create(3)
graph = gtsam.NonlinearFactorGraph()
R12 = R.compose(R.compose(R))
graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL))
keys = gtsam.KeyVector()
keys.append(1)
keys.append(2)
graph.add(gtsam.KarcherMeanFactorRot3(keys))
initial = gtsam.Values()
initial.insert(1, R.inverse())
initial.insert(2, R)
expected = Rot3()
result = gtsam.GaussNewtonOptimizer(graph, initial).optimize()
actual = gtsam.FindKarcherMean(
gtsam.Rot3Vector([result.atRot3(1),
result.atRot3(2)]))
self.gtsamAssertEquals(expected, actual)
self.gtsamAssertEquals(R12, result.atRot3(1).between(result.atRot3(2)))
def test_align(self) -> None:
"""Ensure estimation of the Pose2 element to align two 2d point clouds succeeds.
Two point clouds represent horseshoe-shapes of the same size, just rotated and translated:
| X---X
| |
| X---X
------------------
|
|
O | O
| | |
O---O
"""
pts_a = [
Point2(1, -3),
Point2(1, -5),
Point2(-1, -5),
Point2(-1, -3),
]
pts_b = [
Point2(3, 1),
Point2(1, 1),
Point2(1, 3),
Point2(3, 3),
]
ab_pairs = Point2Pairs(list(zip(pts_a, pts_b)))
aTb = Pose2.Align(ab_pairs)
self.assertIsNotNone(aTb)
for pt_a, pt_b in zip(pts_a, pts_b):
pt_a_ = aTb.transformFrom(pt_b)
np.testing.assert_allclose(pt_a, pt_a_)
# Matrix version
A = np.array(pts_a).T
B = np.array(pts_b).T
aTb = Pose2.Align(A, B)
self.assertIsNotNone(aTb)
for pt_a, pt_b in zip(pts_a, pts_b):
pt_a_ = aTb.transformFrom(pt_b)
np.testing.assert_allclose(pt_a, pt_a_)
def test_align_squares(self):
"""Test if Align method can align 2 squares."""
square = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]],
float).T
sTt = Pose3(Rot3.Rodrigues(0, 0, -np.pi), gtsam.Point3(2, 4, 0))
transformed = sTt.transformTo(square)
st_pairs = gtsam.Point3Pairs()
for j in range(4):
st_pairs.append((square[:, j], transformed[:, j]))
# Recover the transformation sTt
estimated_sTt = Pose3.Align(st_pairs)
self.gtsamAssertEquals(estimated_sTt, sTt, 1e-10)
# Matrix version
estimated_sTt = Pose3.Align(square, transformed)
self.gtsamAssertEquals(estimated_sTt, sTt, 1e-10)
def test_constructorBetweenFactorPose2s(self) -> None:
"""Check if ShonanAveraging2 constructor works when not initialized from g2o file.
GT pose graph:
| cam 1 = (0,4)
--o
| .
. .
. .
| |
o-- ... o--
cam 0 cam 2 = (4,0)
(0,0)
"""
num_images = 3
wTi_list = [
Pose2(Rot2.fromDegrees(0), np.array([0, 0])),
Pose2(Rot2.fromDegrees(90), np.array([0, 4])),
Pose2(Rot2.fromDegrees(0), np.array([4, 0])),
]
edges = [(0, 1), (1, 2), (0, 2)]
i2Ri1_dict = {(i1, i2):
wTi_list[i2].inverse().compose(wTi_list[i1]).rotation()
for (i1, i2) in edges}
lm_params = LevenbergMarquardtParams.CeresDefaults()
shonan_params = ShonanAveragingParameters2(lm_params)
shonan_params.setUseHuber(False)
shonan_params.setCertifyOptimality(True)
noise_model = gtsam.noiseModel.Unit.Create(3)
between_factors = gtsam.BetweenFactorPose2s()
for (i1, i2), i2Ri1 in i2Ri1_dict.items():
i2Ti1 = Pose2(i2Ri1, np.zeros(2))
between_factors.append(
BetweenFactorPose2(i2, i1, i2Ti1, noise_model))
obj = ShonanAveraging2(between_factors, shonan_params)
initial = obj.initializeRandomly()
result_values, _ = obj.run(initial, min_p=2, max_p=100)
wRi_list = [result_values.atRot2(i) for i in range(num_images)]
thetas_deg = np.array([wRi.degrees() for wRi in wRi_list])
# map all angles to [0,360)
thetas_deg = thetas_deg % 360
thetas_deg -= thetas_deg[0]
expected_thetas_deg = np.array([0.0, 90.0, 0.0])
np.testing.assert_allclose(thetas_deg, expected_thetas_deg, atol=0.1)
def test_align_poses2_along_straight_line(self) -> None:
"""Test Align of list of Pose2Pair.
Scenario:
3 object poses
same scale (no gauge ambiguity)
world frame has poses rotated about 180 degrees.
world and egovehicle frame translated by 15 meters w.r.t. each other
"""
R180 = Rot2.fromDegrees(180)
# Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
# Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
eTo0 = Pose2(Rot2(), np.array([5, 0]))
eTo1 = Pose2(Rot2(), np.array([10, 0]))
eTo2 = Pose2(Rot2(), np.array([15, 0]))
eToi_list = [eTo0, eTo1, eTo2]
# Create destination poses
# (same three objects, but instead living in the world "w" frame)
wTo0 = Pose2(R180, np.array([-10, 0]))
wTo1 = Pose2(R180, np.array([-5, 0]))
wTo2 = Pose2(R180, np.array([0, 0]))
wToi_list = [wTo0, wTo1, wTo2]
we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list)))
# Recover the transformation wSe (i.e. world_S_egovehicle)
wSe = Similarity2.Align(we_pairs)
for wToi, eToi in zip(wToi_list, eToi_list):
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
def test_align_poses2_along_straight_line_gauge(self):
"""Test if Align Pose2Pairs method can account for gauge ambiguity.
Scenario:
3 object poses
with gauge ambiguity (2x scale)
world frame has poses rotated by 90 degrees.
world and egovehicle frame translated by 11 meters w.r.t. each other
"""
R90 = Rot2.fromDegrees(90)
# Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
# Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
eTo0 = Pose2(Rot2(), np.array([1, 0]))
eTo1 = Pose2(Rot2(), np.array([2, 0]))
eTo2 = Pose2(Rot2(), np.array([4, 0]))
eToi_list = [eTo0, eTo1, eTo2]
# Create destination poses
# (same three objects, but instead living in the world/city "w" frame)
wTo0 = Pose2(R90, np.array([0, 12]))
wTo1 = Pose2(R90, np.array([0, 14]))
wTo2 = Pose2(R90, np.array([0, 18]))
wToi_list = [wTo0, wTo1, wTo2]
we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list)))
# Recover the transformation wSe (i.e. world_S_egovehicle)
wSe = Similarity2.Align(we_pairs)
for wToi, eToi in zip(wToi_list, eToi_list):
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
def test_align_poses2_scaled_squares(self):
"""Test if Align Pose2Pairs method can account for gauge ambiguity.
Make sure a big and small square can be aligned.
The u's represent a big square (10x10), and v's represents a small square (4x4).
Scenario:
4 object poses
with gauge ambiguity (2.5x scale)
"""
# 0, 90, 180, and 270 degrees yaw
R0 = Rot2.fromDegrees(0)
R90 = Rot2.fromDegrees(90)
R180 = Rot2.fromDegrees(180)
R270 = Rot2.fromDegrees(270)
aTi0 = Pose2(R0, np.array([2, 3]))
aTi1 = Pose2(R90, np.array([12, 3]))
aTi2 = Pose2(R180, np.array([12, 13]))
aTi3 = Pose2(R270, np.array([2, 13]))
aTi_list = [aTi0, aTi1, aTi2, aTi3]
bTi0 = Pose2(R0, np.array([4, 3]))
bTi1 = Pose2(R90, np.array([8, 3]))
bTi2 = Pose2(R180, np.array([8, 7]))
bTi3 = Pose2(R270, np.array([4, 7]))
bTi_list = [bTi0, bTi1, bTi2, bTi3]
ab_pairs = Pose2Pairs(list(zip(aTi_list, bTi_list)))
# Recover the transformation wSe (i.e. world_S_egovehicle)
aSb = Similarity2.Align(ab_pairs)
for aTi, bTi in zip(aTi_list, bTi_list):
self.gtsamAssertEquals(aTi, aSb.transformFrom(bTi))
def test_align_poses3_along_straight_line(self):
"""Test Align Pose3Pairs method.
Scenario:
3 object poses
same scale (no gauge ambiguity)
world frame has poses rotated about x-axis (90 degree roll)
world and egovehicle frame translated by 15 meters w.r.t. each other
"""
Rx90 = Rot3.Rx(np.deg2rad(90))
# Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
# Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
eTo0 = Pose3(Rot3(), np.array([5, 0, 0]))
eTo1 = Pose3(Rot3(), np.array([10, 0, 0]))
eTo2 = Pose3(Rot3(), np.array([15, 0, 0]))
eToi_list = [eTo0, eTo1, eTo2]
# Create destination poses
# (same three objects, but instead living in the world/city "w" frame)
wTo0 = Pose3(Rx90, np.array([-10, 0, 0]))
wTo1 = Pose3(Rx90, np.array([-5, 0, 0]))
wTo2 = Pose3(Rx90, np.array([0, 0, 0]))
wToi_list = [wTo0, wTo1, wTo2]
we_pairs = gtsam.Pose3Pairs(list(zip(wToi_list, eToi_list)))
# Recover the transformation wSe (i.e. world_S_egovehicle)
wSe = Similarity3.Align(we_pairs)
for wToi, eToi in zip(wToi_list, eToi_list):
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
def test_align_poses3_along_straight_line_gauge(self):
"""Test if Align Pose3Pairs method can account for gauge ambiguity.
Scenario:
3 object poses
with gauge ambiguity (2x scale)
world frame has poses rotated about z-axis (90 degree yaw)
world and egovehicle frame translated by 11 meters w.r.t. each other
"""
Rz90 = Rot3.Rz(np.deg2rad(90))
# Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
# Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
eTo0 = Pose3(Rot3(), np.array([1, 0, 0]))
eTo1 = Pose3(Rot3(), np.array([2, 0, 0]))
eTo2 = Pose3(Rot3(), np.array([4, 0, 0]))
eToi_list = [eTo0, eTo1, eTo2]
# Create destination poses
# (same three objects, but instead living in the world/city "w" frame)
wTo0 = Pose3(Rz90, np.array([0, 12, 0]))
wTo1 = Pose3(Rz90, np.array([0, 14, 0]))
wTo2 = Pose3(Rz90, np.array([0, 18, 0]))
wToi_list = [wTo0, wTo1, wTo2]
we_pairs = gtsam.Pose3Pairs(list(zip(wToi_list, eToi_list)))
# Recover the transformation wSe (i.e. world_S_egovehicle)
wSe = Similarity3.Align(we_pairs)
for wToi, eToi in zip(wToi_list, eToi_list):
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
def test_align_poses3_scaled_squares(self):
"""Test if Align Pose3Pairs method can account for gauge ambiguity.
Make sure a big and small square can be aligned.
The u's represent a big square (10x10), and v's represents a small square (4x4).
Scenario:
4 object poses
with gauge ambiguity (2.5x scale)
"""
# 0, 90, 180, and 270 degrees yaw
R0 = Rot3.Rz(np.deg2rad(0))
R90 = Rot3.Rz(np.deg2rad(90))
R180 = Rot3.Rz(np.deg2rad(180))
R270 = Rot3.Rz(np.deg2rad(270))
aTi0 = Pose3(R0, np.array([2, 3, 0]))
aTi1 = Pose3(R90, np.array([12, 3, 0]))
aTi2 = Pose3(R180, np.array([12, 13, 0]))
aTi3 = Pose3(R270, np.array([2, 13, 0]))
aTi_list = [aTi0, aTi1, aTi2, aTi3]
bTi0 = Pose3(R0, np.array([4, 3, 0]))
bTi1 = Pose3(R90, np.array([8, 3, 0]))
bTi2 = Pose3(R180, np.array([8, 7, 0]))
bTi3 = Pose3(R270, np.array([4, 7, 0]))
bTi_list = [bTi0, bTi1, bTi2, bTi3]
ab_pairs = gtsam.Pose3Pairs(list(zip(aTi_list, bTi_list)))
# Recover the transformation wSe (i.e. world_S_egovehicle)
aSb = Similarity3.Align(ab_pairs)
for aTi, bTi in zip(aTi_list, bTi_list):
self.gtsamAssertEquals(aTi, aSb.transformFrom(bTi))
def generate_measurements(
self,
calibration: Union[Cal3Bundler, Cal3_S2],
camera_model: Union[PinholeCameraCal3Bundler, PinholeCameraCal3_S2],
cal_params: Iterable[Iterable[Union[int, float]]],
camera_set: Optional[Union[CameraSetCal3Bundler,
CameraSetCal3_S2]] = None,
) -> Tuple[List[Point2], Union[CameraSetCal3Bundler, CameraSetCal3_S2,
List[Cal3Bundler], List[Cal3_S2]]]:
"""
Generate vector of measurements for given calibration and camera model.
Args:
calibration: Camera calibration e.g. Cal3_S2
camera_model: Camera model e.g. PinholeCameraCal3_S2
cal_params: Iterable of camera parameters for `calibration` e.g. [K1, K2]
camera_set: Cameraset object (for individual calibrations)
Returns:
list of measurements and list/CameraSet object for cameras
"""
if camera_set is not None:
cameras = camera_set()
else:
cameras = []
measurements = gtsam.Point2Vector()
for k, pose in zip(cal_params, self.triangulation_poses):
K = calibration(*k)
camera = camera_model(pose, K)
cameras.append(camera)
z = camera.project(self.landmark)
measurements.append(z)
return measurements, cameras
def test_TriangulationExample(self) -> None:
"""Tests triangulation with shared Cal3_S2 calibration"""
# Some common constants
sharedCal = (1500, 1200, 0, 640, 480)
measurements, _ = self.generate_measurements(
calibration=Cal3_S2,
camera_model=PinholeCameraCal3_S2,
cal_params=(sharedCal, sharedCal))
triangulated_landmark = gtsam.triangulatePoint3(
self.triangulation_poses,
Cal3_S2(sharedCal),
measurements,
rank_tol=1e-9,
optimize=True)
self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-9)
# Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814)
measurements_noisy = gtsam.Point2Vector()
measurements_noisy.append(measurements[0] - np.array([0.1, 0.5]))
measurements_noisy.append(measurements[1] - np.array([-0.2, 0.3]))
triangulated_landmark = gtsam.triangulatePoint3(
self.triangulation_poses,
Cal3_S2(sharedCal),
measurements_noisy,
rank_tol=1e-9,
optimize=True)
self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-2)
def test_triangulation_robust_three_poses(self) -> None:
"""Ensure triangulation with a robust model works."""
sharedCal = Cal3_S2(1500, 1200, 0, 640, 480)
# landmark ~5 meters infront of camera
landmark = Point3(5, 0.5, 1.2)
pose1 = Pose3(UPRIGHT, Point3(0, 0, 1))
pose2 = pose1 * Pose3(Rot3(), Point3(1, 0, 0))
pose3 = pose1 * Pose3(Rot3.Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -0.1))
camera1 = PinholeCameraCal3_S2(pose1, sharedCal)
camera2 = PinholeCameraCal3_S2(pose2, sharedCal)
camera3 = PinholeCameraCal3_S2(pose3, sharedCal)
z1: Point2 = camera1.project(landmark)
z2: Point2 = camera2.project(landmark)
z3: Point2 = camera3.project(landmark)
poses = gtsam.Pose3Vector([pose1, pose2, pose3])
measurements = gtsam.Point2Vector([z1, z2, z3])
# noise free, so should give exactly the landmark
actual = gtsam.triangulatePoint3(poses,
sharedCal,
measurements,
rank_tol=1e-9,
optimize=False)
self.assertTrue(np.allclose(landmark, actual, atol=1e-2))
# Add outlier
measurements[0] += Point2(100, 120) # very large pixel noise!
# now estimate does not match landmark
actual2 = gtsam.triangulatePoint3(poses,
sharedCal,
measurements,
rank_tol=1e-9,
optimize=False)
# DLT is surprisingly robust, but still off (actual error is around 0.26m)
self.assertTrue(np.linalg.norm(landmark - actual2) >= 0.2)
self.assertTrue(np.linalg.norm(landmark - actual2) <= 0.5)
# Again with nonlinear optimization
actual3 = gtsam.triangulatePoint3(poses,
sharedCal,
measurements,
rank_tol=1e-9,
optimize=True)
# result from nonlinear (but non-robust optimization) is close to DLT and still off
self.assertTrue(np.allclose(actual2, actual3, atol=0.1))
# Again with nonlinear optimization, this time with robust loss
model = gtsam.noiseModel.Robust.Create(
gtsam.noiseModel.mEstimator.Huber.Create(1.345),
gtsam.noiseModel.Unit.Create(2))
actual4 = gtsam.triangulatePoint3(poses,
sharedCal,
measurements,
rank_tol=1e-9,
optimize=True,
model=model)
# using the Huber loss we now have a quite small error!! nice!
self.assertTrue(np.allclose(landmark, actual4, atol=0.05))
def test_outliers_and_far_landmarks(self) -> None:
"""Check safe triangulation function."""
pose1, pose2 = self.poses
K1 = Cal3_S2(1500, 1200, 0, 640, 480)
# create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
camera1 = PinholeCameraCal3_S2(pose1, K1)
# create second camera 1 meter to the right of first camera
K2 = Cal3_S2(1600, 1300, 0, 650, 440)
camera2 = PinholeCameraCal3_S2(pose2, K2)
# 1. Project two landmarks into two cameras and triangulate
z1 = camera1.project(self.landmark)
z2 = camera2.project(self.landmark)
cameras = CameraSetCal3_S2()
cameras.append(camera1)
cameras.append(camera2)
measurements = gtsam.Point2Vector()
measurements.append(z1)
measurements.append(z2)
landmarkDistanceThreshold = 10 # landmark is closer than that
# all default except landmarkDistanceThreshold:
params = TriangulationParameters(1.0, False, landmarkDistanceThreshold)
actual: TriangulationResult = gtsam.triangulateSafe(
cameras, measurements, params)
self.gtsamAssertEquals(actual.get(), self.landmark, 1e-2)
self.assertTrue(actual.valid())
landmarkDistanceThreshold = 4 # landmark is farther than that
params2 = TriangulationParameters(1.0, False,
landmarkDistanceThreshold)
actual = gtsam.triangulateSafe(cameras, measurements, params2)
self.assertTrue(actual.farPoint())
# 3. Add a slightly rotated third camera above with a wrong measurement
# (OUTLIER)
pose3 = pose1 * Pose3(Rot3.Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1))
K3 = Cal3_S2(700, 500, 0, 640, 480)
camera3 = PinholeCameraCal3_S2(pose3, K3)
z3 = camera3.project(self.landmark)
cameras.append(camera3)
measurements.append(z3 + Point2(10, -10))
landmarkDistanceThreshold = 10 # landmark is closer than that
outlierThreshold = 100 # loose, the outlier is going to pass
params3 = TriangulationParameters(1.0, False,
landmarkDistanceThreshold,
outlierThreshold)
actual = gtsam.triangulateSafe(cameras, measurements, params3)
self.assertTrue(actual.valid())
# now set stricter threshold for outlier rejection
outlierThreshold = 5 # tighter, the outlier is not going to pass
params4 = TriangulationParameters(1.0, False,
landmarkDistanceThreshold,
outlierThreshold)
actual = gtsam.triangulateSafe(cameras, measurements, params4)
self.assertTrue(actual.outlier())
if __name__ == "__main__":
unittest.main()

View File

@ -8,31 +8,34 @@ See LICENSE for the license information
CustomFactor unit tests.
Author: Fan Jiang
"""
from typing import List
import unittest
from gtsam import Values, Pose2, CustomFactor
from typing import List
import numpy as np
from gtsam.utils.test_case import GtsamTestCase
import gtsam
from gtsam.utils.test_case import GtsamTestCase
from gtsam import CustomFactor, Pose2, Values
class TestCustomFactor(GtsamTestCase):
def test_new(self):
"""Test the creation of a new CustomFactor"""
def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
def error_func(this: CustomFactor, v: gtsam.Values,
H: List[np.ndarray]):
"""Minimal error function stub"""
return np.array([1, 0, 0])
return np.array([1, 0, 0]), H
noise_model = gtsam.noiseModel.Unit.Create(3)
cf = CustomFactor(noise_model, gtsam.KeyVector([0]), error_func)
cf = CustomFactor(noise_model, [0], error_func)
def test_new_keylist(self):
"""Test the creation of a new CustomFactor"""
def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
def error_func(this: CustomFactor, v: gtsam.Values,
H: List[np.ndarray]):
"""Minimal error function stub"""
return np.array([1, 0, 0])
@ -43,7 +46,8 @@ class TestCustomFactor(GtsamTestCase):
"""Test if calling the factor works (only error)"""
expected_pose = Pose2(1, 1, 0)
def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]) -> np.ndarray:
def error_func(this: CustomFactor, v: gtsam.Values,
H: List[np.ndarray]) -> np.ndarray:
"""Minimal error function with no Jacobian"""
key0 = this.keys()[0]
error = -v.atPose2(key0).localCoordinates(expected_pose)
@ -65,7 +69,8 @@ class TestCustomFactor(GtsamTestCase):
expected = Pose2(2, 2, np.pi / 2)
def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
def error_func(this: CustomFactor, v: gtsam.Values,
H: List[np.ndarray]):
"""
the custom error function. One can freely use variables captured
from the outside scope. Or the variables can be acquired by indexing `v`.
@ -84,7 +89,7 @@ class TestCustomFactor(GtsamTestCase):
return error
noise_model = gtsam.noiseModel.Unit.Create(3)
cf = CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func)
cf = CustomFactor(noise_model, [0, 1], error_func)
v = Values()
v.insert(0, gT1)
v.insert(1, gT2)
@ -104,7 +109,8 @@ class TestCustomFactor(GtsamTestCase):
gT1 = Pose2(1, 2, np.pi / 2)
gT2 = Pose2(-1, 4, np.pi)
def error_func(this: CustomFactor, v: gtsam.Values, _: List[np.ndarray]):
def error_func(this: CustomFactor, v: gtsam.Values,
_: List[np.ndarray]):
"""Minimal error function stub"""
return np.array([1, 0, 0])
@ -125,7 +131,8 @@ class TestCustomFactor(GtsamTestCase):
expected = Pose2(2, 2, np.pi / 2)
def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
def error_func(this: CustomFactor, v: gtsam.Values,
H: List[np.ndarray]):
"""
Error function that mimics a BetweenFactor
:param this: reference to the current CustomFactor being evaluated
@ -138,7 +145,8 @@ class TestCustomFactor(GtsamTestCase):
gT1, gT2 = v.atPose2(key0), v.atPose2(key1)
error = expected.localCoordinates(gT1.between(gT2))
self.assertTrue(H is None) # Should be true if we only request the error
self.assertTrue(
H is None) # Should be true if we only request the error
if H is not None:
result = gT1.between(gT2)
@ -147,7 +155,7 @@ class TestCustomFactor(GtsamTestCase):
return error
noise_model = gtsam.noiseModel.Unit.Create(3)
cf = CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func)
cf = CustomFactor(noise_model, [0, 1], error_func)
v = Values()
v.insert(0, gT1)
v.insert(1, gT2)
@ -165,7 +173,8 @@ class TestCustomFactor(GtsamTestCase):
expected = Pose2(2, 2, np.pi / 2)
def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
def error_func(this: CustomFactor, v: gtsam.Values,
H: List[np.ndarray]):
"""
Error function that mimics a BetweenFactor
:param this: reference to the current CustomFactor being evaluated

View File

@ -12,7 +12,6 @@ from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-import
import gtsam
from gtsam import Marginals, Point2, Point3, Pose2, Pose3, Values
# For translation between a scaling of the uncertainty ellipse and the
# percentage of inliers see discussion in
# [PR 1067](https://github.com/borglab/gtsam/pull/1067)
@ -557,7 +556,7 @@ def plot_incremental_trajectory(fignum: int,
axes = fig.axes[0]
poses = gtsam.utilities.allPose3s(values)
keys = gtsam.KeyVector(poses.keys())
keys = poses.keys()
for key in keys[start:]:
if values.exists(key):

View File

@ -13,7 +13,7 @@ if (NOT GTSAM_USE_BOOST_FEATURES)
endif()
if (NOT GTSAM_ENABLE_BOOST_SERIALIZATION)
list(APPEND excluded_tests "testSerializationSLAM.cpp")
list(APPEND excluded_tests "testSerializationSlam.cpp")
endif()
# Build tests

View File

@ -21,7 +21,6 @@ using namespace gtsam;
int main(int argc, char *argv[]) {
// FIXME: ticPush_ does not exist
{
gttic_(top1);
gttic_(sub1);