Basis functions (#403)
parent
dc148edd5e
commit
ff7ddf48bd
|
@ -22,18 +22,19 @@ if (GTSAM_BUILD_DOCS)
|
|||
|
||||
# GTSAM core subfolders
|
||||
set(gtsam_doc_subdirs
|
||||
gtsam/base
|
||||
gtsam/discrete
|
||||
gtsam/geometry
|
||||
gtsam/inference
|
||||
gtsam/linear
|
||||
gtsam/navigation
|
||||
gtsam/nonlinear
|
||||
gtsam/sam
|
||||
gtsam/sfm
|
||||
gtsam/slam
|
||||
gtsam/smart
|
||||
gtsam/symbolic
|
||||
gtsam/base
|
||||
gtsam/basis
|
||||
gtsam/discrete
|
||||
gtsam/geometry
|
||||
gtsam/inference
|
||||
gtsam/linear
|
||||
gtsam/navigation
|
||||
gtsam/nonlinear
|
||||
gtsam/sam
|
||||
gtsam/sfm
|
||||
gtsam/slam
|
||||
gtsam/smart
|
||||
gtsam/symbolic
|
||||
gtsam
|
||||
)
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ project(gtsam LANGUAGES CXX)
|
|||
# The following variable is the master list of subdirs to add
|
||||
set (gtsam_subdirs
|
||||
base
|
||||
basis
|
||||
geometry
|
||||
inference
|
||||
symbolic
|
||||
|
|
|
@ -0,0 +1,507 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Basis.h
|
||||
* @brief Compute an interpolating basis
|
||||
* @author Varun Agrawal, Jing Dong, Frank Dellaert
|
||||
* @date July 4, 2020
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
/**
|
||||
* This file supports creating continuous functions `f(x;p)` as a linear
|
||||
* combination of `basis functions` such as the Fourier basis on SO(2) or a set
|
||||
* of Chebyshev polynomials on [-1,1].
|
||||
*
|
||||
* In the expression `f(x;p)` the variable `x` is
|
||||
* the continuous argument at which the function is evaluated, and `p` are
|
||||
* the parameters which are coefficients of the different basis functions,
|
||||
* e.g. p = [4; 3; 2] => 4 + 3x + 2x^2 for a polynomial.
|
||||
* However, different parameterizations are also possible.
|
||||
|
||||
* The `Basis` class below defines a number of functors that can be used to
|
||||
* evaluate `f(x;p)` at a given `x`, and these functors also calculate
|
||||
* the Jacobian of `f(x;p)` with respect to the parameters `p`.
|
||||
* This is actually the most important calculation, as it will allow GTSAM
|
||||
* to optimize over the parameters `p`.
|
||||
|
||||
* This functionality is implemented using the `CRTP` or "Curiously recurring
|
||||
* template pattern" C++ idiom, which is a meta-programming technique in which
|
||||
* the derived class is passed as a template argument to `Basis<DERIVED>`.
|
||||
* The DERIVED class is assumed to satisfy a C++ concept,
|
||||
* i.e., we expect it to define the following types and methods:
|
||||
|
||||
- type `Parameters`: the parameters `p` in f(x;p)
|
||||
- `CalculateWeights(size_t N, double x, double a=default, double b=default)`
|
||||
- `DerivativeWeights(size_t N, double x, double a=default, double b=default)`
|
||||
|
||||
where `Weights` is an N*1 row vector which defines the basis values for the
|
||||
polynomial at the specified point `x`.
|
||||
|
||||
E.g. A Fourier series would give the following:
|
||||
- `CalculateWeights` -> For N=5, the values for the bases:
|
||||
[1, cos(x), sin(x), cos(2x), sin(2x)]
|
||||
- `DerivativeWeights` -> For N=5, these are:
|
||||
[0, -sin(x), cos(x), -2sin(2x), 2cos(x)]
|
||||
|
||||
Note that for a pseudo-spectral basis (as in Chebyshev2), the weights are
|
||||
instead the values for the Barycentric interpolation formula, since the values
|
||||
at the polynomial points (e.g. Chebyshev points) define the bases.
|
||||
*/
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using Weights = Eigen::Matrix<double, 1, -1>; /* 1xN vector */
|
||||
|
||||
/**
|
||||
* @brief Function for computing the kronecker product of the 1*N Weight vector
|
||||
* `w` with the MxM identity matrix `I` efficiently.
|
||||
* The main reason for this is so we don't need to use Eigen's Unsupported
|
||||
* library.
|
||||
*
|
||||
* @tparam M Size of the identity matrix.
|
||||
* @param w The weights of the polynomial.
|
||||
* @return Mx(M*N) kronecker product [w(0)*I, w(1)*I, ..., w(N-1)*I]
|
||||
*/
|
||||
template <size_t M>
|
||||
Matrix kroneckerProductIdentity(const Weights& w) {
|
||||
Matrix result(M, w.cols() * M);
|
||||
result.setZero();
|
||||
|
||||
for (int i = 0; i < w.cols(); i++) {
|
||||
result.block(0, i * M, M, M).diagonal().array() = w(i);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/// CRTP Base class for function bases
|
||||
template <typename DERIVED>
|
||||
class GTSAM_EXPORT Basis {
|
||||
public:
|
||||
/**
|
||||
* Calculate weights for all x in vector X.
|
||||
* Returns M*N matrix where M is the size of the vector X,
|
||||
* and N is the number of basis functions.
|
||||
*/
|
||||
static Matrix WeightMatrix(size_t N, const Vector& X) {
|
||||
Matrix W(X.size(), N);
|
||||
for (int i = 0; i < X.size(); i++)
|
||||
W.row(i) = DERIVED::CalculateWeights(N, X(i));
|
||||
return W;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Calculate weights for all x in vector X, with interval [a,b].
|
||||
*
|
||||
* @param N The number of basis functions.
|
||||
* @param X The vector for which to compute the weights.
|
||||
* @param a The lower bound for the interval range.
|
||||
* @param b The upper bound for the interval range.
|
||||
* @return Returns M*N matrix where M is the size of the vector X.
|
||||
*/
|
||||
static Matrix WeightMatrix(size_t N, const Vector& X, double a, double b) {
|
||||
Matrix W(X.size(), N);
|
||||
for (int i = 0; i < X.size(); i++)
|
||||
W.row(i) = DERIVED::CalculateWeights(N, X(i), a, b);
|
||||
return W;
|
||||
}
|
||||
|
||||
/**
|
||||
* An instance of an EvaluationFunctor calculates f(x;p) at a given `x`,
|
||||
* applied to Parameters `p`.
|
||||
* This functor is used to evaluate a parameterized function at a given scalar
|
||||
* value x. When given a specific N*1 vector of Parameters, returns the scalar
|
||||
* value of the function at x, possibly with Jacobians wrpt the parameters.
|
||||
*/
|
||||
class EvaluationFunctor {
|
||||
protected:
|
||||
Weights weights_;
|
||||
|
||||
public:
|
||||
/// For serialization
|
||||
EvaluationFunctor() {}
|
||||
|
||||
/// Constructor with interval [a,b]
|
||||
EvaluationFunctor(size_t N, double x)
|
||||
: weights_(DERIVED::CalculateWeights(N, x)) {}
|
||||
|
||||
/// Constructor with interval [a,b]
|
||||
EvaluationFunctor(size_t N, double x, double a, double b)
|
||||
: weights_(DERIVED::CalculateWeights(N, x, a, b)) {}
|
||||
|
||||
/// Regular 1D evaluation
|
||||
double apply(const typename DERIVED::Parameters& p,
|
||||
OptionalJacobian<-1, -1> H = boost::none) const {
|
||||
if (H) *H = weights_;
|
||||
return (weights_ * p)(0);
|
||||
}
|
||||
|
||||
/// c++ sugar
|
||||
double operator()(const typename DERIVED::Parameters& p,
|
||||
OptionalJacobian<-1, -1> H = boost::none) const {
|
||||
return apply(p, H); // might call apply in derived
|
||||
}
|
||||
|
||||
void print(const std::string& s = "") const {
|
||||
std::cout << s << (s != "" ? " " : "") << weights_ << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* VectorEvaluationFunctor at a given x, applied to ParameterMatrix<M>.
|
||||
* This functor is used to evaluate a parameterized function at a given scalar
|
||||
* value x. When given a specific M*N parameters, returns an M-vector the M
|
||||
* corresponding functions at x, possibly with Jacobians wrpt the parameters.
|
||||
*/
|
||||
template <int M>
|
||||
class VectorEvaluationFunctor : protected EvaluationFunctor {
|
||||
protected:
|
||||
using VectorM = Eigen::Matrix<double, M, 1>;
|
||||
using Jacobian = Eigen::Matrix<double, /*MxMN*/ M, -1>;
|
||||
Jacobian H_;
|
||||
|
||||
/**
|
||||
* Calculate the `M*(M*N)` Jacobian of this functor with respect to
|
||||
* the M*N parameter matrix `P`.
|
||||
* We flatten assuming column-major order, e.g., if N=3 and M=2, we have
|
||||
* H =[ w(0) 0 w(1) 0 w(2) 0
|
||||
* 0 w(0) 0 w(1) 0 w(2) ]
|
||||
* i.e., the Kronecker product of weights_ with the MxM identity matrix.
|
||||
*/
|
||||
void calculateJacobian() {
|
||||
H_ = kroneckerProductIdentity<M>(this->weights_);
|
||||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
/// For serialization
|
||||
VectorEvaluationFunctor() {}
|
||||
|
||||
/// Default Constructor
|
||||
VectorEvaluationFunctor(size_t N, double x) : EvaluationFunctor(N, x) {
|
||||
calculateJacobian();
|
||||
}
|
||||
|
||||
/// Constructor, with interval [a,b]
|
||||
VectorEvaluationFunctor(size_t N, double x, double a, double b)
|
||||
: EvaluationFunctor(N, x, a, b) {
|
||||
calculateJacobian();
|
||||
}
|
||||
|
||||
/// M-dimensional evaluation
|
||||
VectorM apply(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxN*/ -1, -1> H = boost::none) const {
|
||||
if (H) *H = H_;
|
||||
return P.matrix() * this->weights_.transpose();
|
||||
}
|
||||
|
||||
/// c++ sugar
|
||||
VectorM operator()(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxN*/ -1, -1> H = boost::none) const {
|
||||
return apply(P, H);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Given a M*N Matrix of M-vectors at N polynomial points, an instance of
|
||||
* VectorComponentFunctor computes the N-vector value for a specific row
|
||||
* component of the M-vectors at all the polynomial points.
|
||||
*
|
||||
* This component is specified by the row index i, with 0<i<M.
|
||||
*/
|
||||
template <int M>
|
||||
class VectorComponentFunctor : public EvaluationFunctor {
|
||||
protected:
|
||||
using Jacobian = Eigen::Matrix<double, /*1xMN*/ 1, -1>;
|
||||
size_t rowIndex_;
|
||||
Jacobian H_;
|
||||
|
||||
/*
|
||||
* Calculate the `1*(M*N)` Jacobian of this functor with respect to
|
||||
* the M*N parameter matrix `P`.
|
||||
* We flatten assuming column-major order, e.g., if N=3 and M=2, we have
|
||||
* H=[w(0) 0 w(1) 0 w(2) 0] for rowIndex==0
|
||||
* H=[0 w(0) 0 w(1) 0 w(2)] for rowIndex==1
|
||||
* i.e., one row of the Kronecker product of weights_ with the
|
||||
* MxM identity matrix. See also VectorEvaluationFunctor.
|
||||
*/
|
||||
void calculateJacobian(size_t N) {
|
||||
H_.setZero(1, M * N);
|
||||
for (int j = 0; j < EvaluationFunctor::weights_.size(); j++)
|
||||
H_(0, rowIndex_ + j * M) = EvaluationFunctor::weights_(j);
|
||||
}
|
||||
|
||||
public:
|
||||
/// For serialization
|
||||
VectorComponentFunctor() {}
|
||||
|
||||
/// Construct with row index
|
||||
VectorComponentFunctor(size_t N, size_t i, double x)
|
||||
: EvaluationFunctor(N, x), rowIndex_(i) {
|
||||
calculateJacobian(N);
|
||||
}
|
||||
|
||||
/// Construct with row index and interval
|
||||
VectorComponentFunctor(size_t N, size_t i, double x, double a, double b)
|
||||
: EvaluationFunctor(N, x, a, b), rowIndex_(i) {
|
||||
calculateJacobian(N);
|
||||
}
|
||||
|
||||
/// Calculate component of component rowIndex_ of P
|
||||
double apply(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*1xMN*/ -1, -1> H = boost::none) const {
|
||||
if (H) *H = H_;
|
||||
return P.row(rowIndex_) * EvaluationFunctor::weights_.transpose();
|
||||
}
|
||||
|
||||
/// c++ sugar
|
||||
double operator()(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*1xMN*/ -1, -1> H = boost::none) const {
|
||||
return apply(P, H);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Manifold EvaluationFunctor at a given x, applied to ParameterMatrix<M>.
|
||||
* This functor is used to evaluate a parameterized function at a given scalar
|
||||
* value x. When given a specific M*N parameters, returns an M-vector the M
|
||||
* corresponding functions at x, possibly with Jacobians wrpt the parameters.
|
||||
*
|
||||
* The difference with the VectorEvaluationFunctor is that after computing the
|
||||
* M*1 vector xi=F(x;P), with x a scalar and P the M*N parameter vector, we
|
||||
* also retract xi back to the T manifold.
|
||||
* For example, if T==Rot3, then we first compute a 3-vector xi using x and P,
|
||||
* and then map that 3-vector xi back to the Rot3 manifold, yielding a valid
|
||||
* 3D rotation.
|
||||
*/
|
||||
template <class T>
|
||||
class ManifoldEvaluationFunctor
|
||||
: public VectorEvaluationFunctor<traits<T>::dimension> {
|
||||
enum { M = traits<T>::dimension };
|
||||
using Base = VectorEvaluationFunctor<M>;
|
||||
|
||||
public:
|
||||
/// For serialization
|
||||
ManifoldEvaluationFunctor() {}
|
||||
|
||||
/// Default Constructor
|
||||
ManifoldEvaluationFunctor(size_t N, double x) : Base(N, x) {}
|
||||
|
||||
/// Constructor, with interval [a,b]
|
||||
ManifoldEvaluationFunctor(size_t N, double x, double a, double b)
|
||||
: Base(N, x, a, b) {}
|
||||
|
||||
/// Manifold evaluation
|
||||
T apply(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxMN*/ -1, -1> H = boost::none) const {
|
||||
// Interpolate the M-dimensional vector to yield a vector in tangent space
|
||||
Eigen::Matrix<double, M, 1> xi = Base::operator()(P, H);
|
||||
|
||||
// Now call retract with this M-vector, possibly with derivatives
|
||||
Eigen::Matrix<double, M, M> D_result_xi;
|
||||
T result = T::ChartAtOrigin::Retract(xi, H ? &D_result_xi : 0);
|
||||
|
||||
// Finally, if derivatives are asked, apply chain rule where H is Mx(M*N)
|
||||
// derivative of interpolation and D_result_xi is MxM derivative of
|
||||
// retract.
|
||||
if (H) *H = D_result_xi * (*H);
|
||||
|
||||
// and return a T
|
||||
return result;
|
||||
}
|
||||
|
||||
/// c++ sugar
|
||||
T operator()(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxN*/ -1, -1> H = boost::none) const {
|
||||
return apply(P, H); // might call apply in derived
|
||||
}
|
||||
};
|
||||
|
||||
/// Base class for functors below that calculate derivative weights
|
||||
class DerivativeFunctorBase {
|
||||
protected:
|
||||
Weights weights_;
|
||||
|
||||
public:
|
||||
/// For serialization
|
||||
DerivativeFunctorBase() {}
|
||||
|
||||
DerivativeFunctorBase(size_t N, double x)
|
||||
: weights_(DERIVED::DerivativeWeights(N, x)) {}
|
||||
|
||||
DerivativeFunctorBase(size_t N, double x, double a, double b)
|
||||
: weights_(DERIVED::DerivativeWeights(N, x, a, b)) {}
|
||||
|
||||
void print(const std::string& s = "") const {
|
||||
std::cout << s << (s != "" ? " " : "") << weights_ << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* An instance of a DerivativeFunctor calculates f'(x;p) at a given `x`,
|
||||
* applied to Parameters `p`.
|
||||
* When given a scalar value x and a specific N*1 vector of Parameters,
|
||||
* this functor returns the scalar derivative value of the function at x,
|
||||
* possibly with Jacobians wrpt the parameters.
|
||||
*/
|
||||
class DerivativeFunctor : protected DerivativeFunctorBase {
|
||||
public:
|
||||
/// For serialization
|
||||
DerivativeFunctor() {}
|
||||
|
||||
DerivativeFunctor(size_t N, double x) : DerivativeFunctorBase(N, x) {}
|
||||
|
||||
DerivativeFunctor(size_t N, double x, double a, double b)
|
||||
: DerivativeFunctorBase(N, x, a, b) {}
|
||||
|
||||
double apply(const typename DERIVED::Parameters& p,
|
||||
OptionalJacobian</*1xN*/ -1, -1> H = boost::none) const {
|
||||
if (H) *H = this->weights_;
|
||||
return (this->weights_ * p)(0);
|
||||
}
|
||||
/// c++ sugar
|
||||
double operator()(const typename DERIVED::Parameters& p,
|
||||
OptionalJacobian</*1xN*/ -1, -1> H = boost::none) const {
|
||||
return apply(p, H); // might call apply in derived
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* VectorDerivativeFunctor at a given x, applied to ParameterMatrix<M>.
|
||||
*
|
||||
* This functor is used to evaluate the derivatives of a parameterized
|
||||
* function at a given scalar value x. When given a specific M*N parameters,
|
||||
* returns an M-vector the M corresponding function derivatives at x, possibly
|
||||
* with Jacobians wrpt the parameters.
|
||||
*/
|
||||
template <int M>
|
||||
class VectorDerivativeFunctor : protected DerivativeFunctorBase {
|
||||
protected:
|
||||
using VectorM = Eigen::Matrix<double, M, 1>;
|
||||
using Jacobian = Eigen::Matrix<double, /*MxMN*/ M, -1>;
|
||||
Jacobian H_;
|
||||
|
||||
/**
|
||||
* Calculate the `M*(M*N)` Jacobian of this functor with respect to
|
||||
* the M*N parameter matrix `P`.
|
||||
* We flatten assuming column-major order, e.g., if N=3 and M=2, we have
|
||||
* H =[ w(0) 0 w(1) 0 w(2) 0
|
||||
* 0 w(0) 0 w(1) 0 w(2) ]
|
||||
* i.e., the Kronecker product of weights_ with the MxM identity matrix.
|
||||
*/
|
||||
void calculateJacobian() {
|
||||
H_ = kroneckerProductIdentity<M>(this->weights_);
|
||||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
/// For serialization
|
||||
VectorDerivativeFunctor() {}
|
||||
|
||||
/// Default Constructor
|
||||
VectorDerivativeFunctor(size_t N, double x) : DerivativeFunctorBase(N, x) {
|
||||
calculateJacobian();
|
||||
}
|
||||
|
||||
/// Constructor, with optional interval [a,b]
|
||||
VectorDerivativeFunctor(size_t N, double x, double a, double b)
|
||||
: DerivativeFunctorBase(N, x, a, b) {
|
||||
calculateJacobian();
|
||||
}
|
||||
|
||||
VectorM apply(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxMN*/ -1, -1> H = boost::none) const {
|
||||
if (H) *H = H_;
|
||||
return P.matrix() * this->weights_.transpose();
|
||||
}
|
||||
/// c++ sugar
|
||||
VectorM operator()(
|
||||
const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxMN*/ -1, -1> H = boost::none) const {
|
||||
return apply(P, H);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Given a M*N Matrix of M-vectors at N polynomial points, an instance of
|
||||
* ComponentDerivativeFunctor computes the N-vector derivative for a specific
|
||||
* row component of the M-vectors at all the polynomial points.
|
||||
*
|
||||
* This component is specified by the row index i, with 0<i<M.
|
||||
*/
|
||||
template <int M>
|
||||
class ComponentDerivativeFunctor : protected DerivativeFunctorBase {
|
||||
protected:
|
||||
using Jacobian = Eigen::Matrix<double, /*1xMN*/ 1, -1>;
|
||||
size_t rowIndex_;
|
||||
Jacobian H_;
|
||||
|
||||
/*
|
||||
* Calculate the `1*(M*N)` Jacobian of this functor with respect to
|
||||
* the M*N parameter matrix `P`.
|
||||
* We flatten assuming column-major order, e.g., if N=3 and M=2, we have
|
||||
* H=[w(0) 0 w(1) 0 w(2) 0] for rowIndex==0
|
||||
* H=[0 w(0) 0 w(1) 0 w(2)] for rowIndex==1
|
||||
* i.e., one row of the Kronecker product of weights_ with the
|
||||
* MxM identity matrix. See also VectorDerivativeFunctor.
|
||||
*/
|
||||
void calculateJacobian(size_t N) {
|
||||
H_.setZero(1, M * N);
|
||||
for (int j = 0; j < this->weights_.size(); j++)
|
||||
H_(0, rowIndex_ + j * M) = this->weights_(j);
|
||||
}
|
||||
|
||||
public:
|
||||
/// For serialization
|
||||
ComponentDerivativeFunctor() {}
|
||||
|
||||
/// Construct with row index
|
||||
ComponentDerivativeFunctor(size_t N, size_t i, double x)
|
||||
: DerivativeFunctorBase(N, x), rowIndex_(i) {
|
||||
calculateJacobian(N);
|
||||
}
|
||||
|
||||
/// Construct with row index and interval
|
||||
ComponentDerivativeFunctor(size_t N, size_t i, double x, double a, double b)
|
||||
: DerivativeFunctorBase(N, x, a, b), rowIndex_(i) {
|
||||
calculateJacobian(N);
|
||||
}
|
||||
/// Calculate derivative of component rowIndex_ of F
|
||||
double apply(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*1xMN*/ -1, -1> H = boost::none) const {
|
||||
if (H) *H = H_;
|
||||
return P.row(rowIndex_) * this->weights_.transpose();
|
||||
}
|
||||
/// c++ sugar
|
||||
double operator()(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*1xMN*/ -1, -1> H = boost::none) const {
|
||||
return apply(P, H);
|
||||
}
|
||||
};
|
||||
|
||||
// Vector version for MATLAB :-(
|
||||
static double Derivative(double x, const Vector& p, //
|
||||
OptionalJacobian</*1xN*/ -1, -1> H = boost::none) {
|
||||
return DerivativeFunctor(x)(p.transpose(), H);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,424 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file BasisFactors.h
|
||||
* @brief Factor definitions for various Basis functors.
|
||||
* @author Varun Agrawal
|
||||
* @date July 4, 2020
|
||||
**/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/basis/Basis.h>
|
||||
#include <gtsam/nonlinear/FunctorizedFactor.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @brief Factor for enforcing the scalar value of the polynomial BASIS
|
||||
* representation at `x` is the same as the measurement `z` when using a
|
||||
* pseudo-spectral parameterization.
|
||||
*
|
||||
* @tparam BASIS The basis class to use e.g. Chebyshev2
|
||||
*/
|
||||
template <class BASIS>
|
||||
class GTSAM_EXPORT EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, Vector>;
|
||||
|
||||
public:
|
||||
EvaluationFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new EvaluationFactor object
|
||||
*
|
||||
* @param key Symbol for value to optimize.
|
||||
* @param z The measurement value.
|
||||
* @param model Noise model
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the polynomial.
|
||||
*/
|
||||
EvaluationFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, double x)
|
||||
: Base(key, z, model, typename BASIS::EvaluationFunctor(N, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new EvaluationFactor object
|
||||
*
|
||||
* @param key Symbol for value to optimize.
|
||||
* @param z The measurement value.
|
||||
* @param model Noise model
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
EvaluationFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, double x, double a, double b)
|
||||
: Base(key, z, model, typename BASIS::EvaluationFunctor(N, x, a, b)) {}
|
||||
|
||||
virtual ~EvaluationFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* Unary factor for enforcing BASIS polynomial evaluation on a ParameterMatrix
|
||||
* of size (M, N) is equal to a vector-valued measurement at the same point,
|
||||
when
|
||||
* using a pseudo-spectral parameterization.
|
||||
*
|
||||
* This factors tries to enforce the basis function evaluation `f(x;p)` to take
|
||||
* on the value `z` at location `x`, providing a gradient on the parameters p.
|
||||
* In a probabilistic estimation context, `z` is known as a measurement, and the
|
||||
* parameterized basis function can be seen as a
|
||||
* measurement prediction function.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param M: Size of the evaluated state vector.
|
||||
*/
|
||||
template <class BASIS, int M>
|
||||
class GTSAM_EXPORT VectorEvaluationFactor
|
||||
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
|
||||
|
||||
public:
|
||||
VectorEvaluationFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorEvaluationFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix object used to represent the
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
VectorEvaluationFactor(Key key, const Vector &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
double x)
|
||||
: Base(key, z, model,
|
||||
typename BASIS::template VectorEvaluationFunctor<M>(N, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorEvaluationFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix object used to represent the
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
VectorEvaluationFactor(Key key, const Vector &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
double x, double a, double b)
|
||||
: Base(key, z, model,
|
||||
typename BASIS::template VectorEvaluationFunctor<M>(N, x, a, b)) {}
|
||||
|
||||
virtual ~VectorEvaluationFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* Unary factor for enforcing BASIS polynomial evaluation on a ParameterMatrix
|
||||
* of size (P, N) is equal to specified measurement at the same point, when
|
||||
* using a pseudo-spectral parameterization.
|
||||
*
|
||||
* This factor is similar to `VectorEvaluationFactor` with the key difference
|
||||
* being that it only enforces the constraint for a single scalar in the vector,
|
||||
* indexed by `i`.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param P: Size of the fixed-size vector.
|
||||
*
|
||||
* Example:
|
||||
* VectorComponentFactor<BASIS, P> controlPrior(key, measured, model,
|
||||
* N, i, t, a, b);
|
||||
* where N is the degree and i is the component index.
|
||||
*/
|
||||
template <class BASIS, size_t P>
|
||||
class GTSAM_EXPORT VectorComponentFactor
|
||||
: public FunctorizedFactor<double, ParameterMatrix<P>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
|
||||
|
||||
public:
|
||||
VectorComponentFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorComponentFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix object used to represent the
|
||||
* polynomial.
|
||||
* @param z The scalar value at a specified index `i` of the full measurement
|
||||
* vector.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param i The index for the evaluated vector to give us the desired scalar
|
||||
* value.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
VectorComponentFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, size_t i, double x)
|
||||
: Base(key, z, model,
|
||||
typename BASIS::template VectorComponentFunctor<P>(N, i, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorComponentFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix object used to represent the
|
||||
* polynomial.
|
||||
* @param z The scalar value at a specified index `i` of the full measurement
|
||||
* vector.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param i The index for the evaluated vector to give us the desired scalar
|
||||
* value.
|
||||
* @param x The point at which to evaluate 0the basis polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
VectorComponentFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, size_t i, double x, double a, double b)
|
||||
: Base(
|
||||
key, z, model,
|
||||
typename BASIS::template VectorComponentFunctor<P>(N, i, x, a, b)) {
|
||||
}
|
||||
|
||||
virtual ~VectorComponentFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* For a measurement value of type T i.e. `T z = g(x)`, this unary
|
||||
* factor enforces that the polynomial basis, when evaluated at `x`, gives us
|
||||
* the same `z`, i.e. `T z = g(x) = f(x;p)`.
|
||||
*
|
||||
* This is done via computations on the tangent space of the
|
||||
* manifold of T.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param T: Object type which is synthesized by the provided functor.
|
||||
*
|
||||
* Example:
|
||||
* ManifoldEvaluationFactor<Chebyshev2, Rot3> rotationFactor(key, measurement,
|
||||
* model, N, x, a, b);
|
||||
*
|
||||
* where `x` is the value (e.g. timestep) at which the rotation was evaluated.
|
||||
*/
|
||||
template <class BASIS, typename T>
|
||||
class GTSAM_EXPORT ManifoldEvaluationFactor
|
||||
: public FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>>;
|
||||
|
||||
public:
|
||||
ManifoldEvaluationFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new ManifoldEvaluationFactor object.
|
||||
*
|
||||
* @param key Key for the state matrix parameterizing the function to estimate
|
||||
* via the BASIS.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which the estimated function is evaluated.
|
||||
*/
|
||||
ManifoldEvaluationFactor(Key key, const T &z, const SharedNoiseModel &model,
|
||||
const size_t N, double x)
|
||||
: Base(key, z, model,
|
||||
typename BASIS::template ManifoldEvaluationFunctor<T>(N, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new ManifoldEvaluationFactor object.
|
||||
*
|
||||
* @param key Key for the state matrix parameterizing the function to estimate
|
||||
* via the BASIS.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which the estimated function is evaluated.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
ManifoldEvaluationFactor(Key key, const T &z, const SharedNoiseModel &model,
|
||||
const size_t N, double x, double a, double b)
|
||||
: Base(
|
||||
key, z, model,
|
||||
typename BASIS::template ManifoldEvaluationFunctor<T>(N, x, a, b)) {
|
||||
}
|
||||
|
||||
virtual ~ManifoldEvaluationFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* A unary factor which enforces the evaluation of the derivative of a BASIS
|
||||
* polynomial at a specified point`x` is equal to the scalar measurement `z`.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
*/
|
||||
template <class BASIS>
|
||||
class GTSAM_EXPORT DerivativeFactor
|
||||
: public FunctorizedFactor<double, typename BASIS::Parameters> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, typename BASIS::Parameters>;
|
||||
|
||||
public:
|
||||
DerivativeFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new DerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
DerivativeFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, double x)
|
||||
: Base(key, z, model, typename BASIS::DerivativeFunctor(N, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new DerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
DerivativeFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, double x, double a, double b)
|
||||
: Base(key, z, model, typename BASIS::DerivativeFunctor(N, x, a, b)) {}
|
||||
|
||||
virtual ~DerivativeFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* A unary factor which enforces the evaluation of the derivative of a BASIS
|
||||
* polynomial at a specified point `x` is equal to the vector value `z`.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param M: Size of the evaluated state vector derivative.
|
||||
*/
|
||||
template <class BASIS, int M>
|
||||
class GTSAM_EXPORT VectorDerivativeFactor
|
||||
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
|
||||
using Func = typename BASIS::template VectorDerivativeFunctor<M>;
|
||||
|
||||
public:
|
||||
VectorDerivativeFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorDerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
VectorDerivativeFactor(Key key, const Vector &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
double x)
|
||||
: Base(key, z, model, Func(N, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorDerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
VectorDerivativeFactor(Key key, const Vector &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
double x, double a, double b)
|
||||
: Base(key, z, model, Func(N, x, a, b)) {}
|
||||
|
||||
virtual ~VectorDerivativeFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* A unary factor which enforces the evaluation of the derivative of a BASIS
|
||||
* polynomial is equal to the scalar value at a specific index `i` of a
|
||||
* vector-valued measurement `z`.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param P: Size of the control component derivative.
|
||||
*/
|
||||
template <class BASIS, int P>
|
||||
class GTSAM_EXPORT ComponentDerivativeFactor
|
||||
: public FunctorizedFactor<double, ParameterMatrix<P>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
|
||||
using Func = typename BASIS::template ComponentDerivativeFunctor<P>;
|
||||
|
||||
public:
|
||||
ComponentDerivativeFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new ComponentDerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The scalar measurement value at a specific index `i` of the full
|
||||
* measurement vector.
|
||||
* @param model The degree of the polynomial.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param i The index for the evaluated vector to give us the desired scalar
|
||||
* value.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
ComponentDerivativeFactor(Key key, const double &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
size_t i, double x)
|
||||
: Base(key, z, model, Func(N, i, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new ComponentDerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The scalar measurement value at a specific index `i` of the full
|
||||
* measurement vector.
|
||||
* @param model The degree of the polynomial.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param i The index for the evaluated vector to give us the desired scalar
|
||||
* value.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
ComponentDerivativeFactor(Key key, const double &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
size_t i, double x, double a, double b)
|
||||
: Base(key, z, model, Func(N, i, x, a, b)) {}
|
||||
|
||||
virtual ~ComponentDerivativeFactor() {}
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,6 @@
|
|||
# Install headers
|
||||
file(GLOB basis_headers "*.h")
|
||||
install(FILES ${basis_headers} DESTINATION include/gtsam/basis)
|
||||
|
||||
# Build tests
|
||||
add_subdirectory(tests)
|
|
@ -0,0 +1,98 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Chebyshev.cpp
|
||||
* @brief Chebyshev basis decompositions
|
||||
* @author Varun Agrawal, Jing Dong, Frank Dellaert
|
||||
* @date July 4, 2020
|
||||
*/
|
||||
|
||||
#include <gtsam/basis/Chebyshev.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @brief Scale x from [a, b] to [t1, t2]
|
||||
*
|
||||
* ((b'-a') * (x - a) / (b - a)) + a'
|
||||
*
|
||||
* @param x Value to scale to new range.
|
||||
* @param a Original lower limit.
|
||||
* @param b Original upper limit.
|
||||
* @param t1 New lower limit.
|
||||
* @param t2 New upper limit.
|
||||
* @return double
|
||||
*/
|
||||
static double scale(double x, double a, double b, double t1, double t2) {
|
||||
return ((t2 - t1) * (x - a) / (b - a)) + t1;
|
||||
}
|
||||
|
||||
Weights Chebyshev1Basis::CalculateWeights(size_t N, double x, double a,
|
||||
double b) {
|
||||
Weights Tx(1, N);
|
||||
|
||||
x = scale(x, a, b, -1, 1);
|
||||
|
||||
Tx(0) = 1;
|
||||
Tx(1) = x;
|
||||
for (size_t i = 2; i < N; i++) {
|
||||
// instead of cos(i*acos(x)), this recurrence is much faster
|
||||
Tx(i) = 2 * x * Tx(i - 1) - Tx(i - 2);
|
||||
}
|
||||
return Tx;
|
||||
}
|
||||
|
||||
Weights Chebyshev1Basis::DerivativeWeights(size_t N, double x, double a,
|
||||
double b) {
|
||||
Weights Ux = Chebyshev2Basis::CalculateWeights(N, x, a, b);
|
||||
Weights weights = Weights::Zero(N);
|
||||
for (size_t n = 1; n < N; n++) {
|
||||
weights(n) = n * Ux(n - 1);
|
||||
}
|
||||
return weights;
|
||||
}
|
||||
|
||||
Weights Chebyshev2Basis::CalculateWeights(size_t N, double x, double a,
|
||||
double b) {
|
||||
Weights Ux(N);
|
||||
|
||||
x = scale(x, a, b, -1, 1);
|
||||
|
||||
Ux(0) = 1;
|
||||
Ux(1) = 2 * x;
|
||||
for (size_t i = 2; i < N; i++) {
|
||||
// instead of cos(i*acos(x)), this recurrence is much faster
|
||||
Ux(i) = 2 * x * Ux(i - 1) - Ux(i - 2);
|
||||
}
|
||||
return Ux;
|
||||
}
|
||||
|
||||
Weights Chebyshev2Basis::DerivativeWeights(size_t N, double x, double a,
|
||||
double b) {
|
||||
Weights Tx = Chebyshev1Basis::CalculateWeights(N + 1, x, a, b);
|
||||
Weights Ux = Chebyshev2Basis::CalculateWeights(N, x, a, b);
|
||||
|
||||
Weights weights(N);
|
||||
|
||||
x = scale(x, a, b, -1, 1);
|
||||
if (x == -1 || x == 1) {
|
||||
throw std::runtime_error(
|
||||
"Derivative of Chebyshev2 Basis does not exist at range limits.");
|
||||
}
|
||||
|
||||
for (size_t n = 0; n < N; n++) {
|
||||
weights(n) = ((n + 1) * Tx(n + 1) - x * Ux(n)) / (x * x - 1);
|
||||
}
|
||||
return weights;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,109 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Chebyshev.h
|
||||
* @brief Chebyshev basis decompositions
|
||||
* @author Varun Agrawal, Jing Dong, Frank Dellaert
|
||||
* @date July 4, 2020
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/basis/Basis.h>
|
||||
|
||||
#include <unsupported/Eigen/KroneckerProduct>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Basis of Chebyshev polynomials of the first kind
|
||||
* https://en.wikipedia.org/wiki/Chebyshev_polynomials#First_kind
|
||||
* These are typically denoted with the symbol T_n, where n is the degree.
|
||||
* The parameter N is the number of coefficients, i.e., N = n+1.
|
||||
*/
|
||||
struct Chebyshev1Basis : Basis<Chebyshev1Basis> {
|
||||
using Parameters = Eigen::Matrix<double, -1, 1 /*Nx1*/>;
|
||||
|
||||
Parameters parameters_;
|
||||
|
||||
/**
|
||||
* @brief Evaluate Chebyshev Weights on [-1,1] at x up to order N-1 (N values)
|
||||
*
|
||||
* @param N Degree of the polynomial.
|
||||
* @param x Point to evaluate polynomial at.
|
||||
* @param a Lower limit of polynomial (default=-1).
|
||||
* @param b Upper limit of polynomial (default=1).
|
||||
*/
|
||||
static Weights CalculateWeights(size_t N, double x, double a = -1,
|
||||
double b = 1);
|
||||
|
||||
/**
|
||||
* @brief Evaluate Chebyshev derivative at x.
|
||||
* The derivative weights are pre-multiplied to the polynomial Parameters.
|
||||
*
|
||||
* From Wikipedia we have D[T_n(x),x] = n*U_{n-1}(x)
|
||||
* I.e. the derivative fo a first kind cheb is just a second kind cheb
|
||||
* So, we define a second kind basis here of order N-1
|
||||
* Note that it has one less weight.
|
||||
*
|
||||
* The Parameters pertain to 1st kind chebs up to order N-1
|
||||
* But of course the first one (order 0) is constant, so omit that weight.
|
||||
*
|
||||
* @param N Degree of the polynomial.
|
||||
* @param x Point to evaluate polynomial at.
|
||||
* @param a Lower limit of polynomial (default=-1).
|
||||
* @param b Upper limit of polynomial (default=1).
|
||||
* @return Weights
|
||||
*/
|
||||
static Weights DerivativeWeights(size_t N, double x, double a = -1,
|
||||
double b = 1);
|
||||
}; // Chebyshev1Basis
|
||||
|
||||
/**
|
||||
* Basis of Chebyshev polynomials of the second kind.
|
||||
* https://en.wikipedia.org/wiki/Chebyshev_polynomials#Second_kind
|
||||
* These are typically denoted with the symbol U_n, where n is the degree.
|
||||
* The parameter N is the number of coefficients, i.e., N = n+1.
|
||||
* In contrast to the templates in Chebyshev2, the classes below specify
|
||||
* basis functions, weighted combinations of which are used to approximate
|
||||
* functions. In this sense, they are like the sines and cosines of the Fourier
|
||||
* basis.
|
||||
*/
|
||||
struct Chebyshev2Basis : Basis<Chebyshev2Basis> {
|
||||
using Parameters = Eigen::Matrix<double, -1, 1 /*Nx1*/>;
|
||||
|
||||
/**
|
||||
* Evaluate Chebyshev Weights on [-1,1] at any x up to order N-1 (N values).
|
||||
*
|
||||
* @param N Degree of the polynomial.
|
||||
* @param x Point to evaluate polynomial at.
|
||||
* @param a Lower limit of polynomial (default=-1).
|
||||
* @param b Upper limit of polynomial (default=1).
|
||||
*/
|
||||
static Weights CalculateWeights(size_t N, double x, double a = -1,
|
||||
double b = 1);
|
||||
|
||||
/**
|
||||
* @brief Evaluate Chebyshev derivative at x.
|
||||
*
|
||||
* @param N Degree of the polynomial.
|
||||
* @param x Point to evaluate polynomial at.
|
||||
* @param a Lower limit of polynomial (default=-1).
|
||||
* @param b Upper limit of polynomial (default=1).
|
||||
* @return Weights
|
||||
*/
|
||||
static Weights DerivativeWeights(size_t N, double x, double a = -1,
|
||||
double b = 1);
|
||||
}; // Chebyshev2Basis
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,205 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Chebyshev2.cpp
|
||||
* @brief Chebyshev parameterizations on Chebyshev points of second kind
|
||||
* @author Varun Agrawal, Jing Dong, Frank Dellaert
|
||||
* @date July 4, 2020
|
||||
*/
|
||||
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
Weights Chebyshev2::CalculateWeights(size_t N, double x, double a, double b) {
|
||||
// Allocate space for weights
|
||||
Weights weights(N);
|
||||
|
||||
// We start by getting distances from x to all Chebyshev points
|
||||
// as well as getting smallest distance
|
||||
Weights distances(N);
|
||||
|
||||
for (size_t j = 0; j < N; j++) {
|
||||
const double dj =
|
||||
x - Point(N, j, a, b); // only thing that depends on [a,b]
|
||||
|
||||
if (std::abs(dj) < 1e-10) {
|
||||
// exceptional case: x coincides with a Chebyshev point
|
||||
weights.setZero();
|
||||
weights(j) = 1;
|
||||
return weights;
|
||||
}
|
||||
distances(j) = dj;
|
||||
}
|
||||
|
||||
// Beginning of interval, j = 0, x(0) = a
|
||||
weights(0) = 0.5 / distances(0);
|
||||
|
||||
// All intermediate points j=1:N-2
|
||||
double d = weights(0), s = -1; // changes sign s at every iteration
|
||||
for (size_t j = 1; j < N - 1; j++, s = -s) {
|
||||
weights(j) = s / distances(j);
|
||||
d += weights(j);
|
||||
}
|
||||
|
||||
// End of interval, j = N-1, x(N-1) = b
|
||||
weights(N - 1) = 0.5 * s / distances(N - 1);
|
||||
d += weights(N - 1);
|
||||
|
||||
// normalize
|
||||
return weights / d;
|
||||
}
|
||||
|
||||
Weights Chebyshev2::DerivativeWeights(size_t N, double x, double a, double b) {
|
||||
// Allocate space for weights
|
||||
Weights weightDerivatives(N);
|
||||
|
||||
// toggle variable so we don't need to use `pow` for -1
|
||||
double t = -1;
|
||||
|
||||
// We start by getting distances from x to all Chebyshev points
|
||||
// as well as getting smallest distance
|
||||
Weights distances(N);
|
||||
|
||||
for (size_t j = 0; j < N; j++) {
|
||||
const double dj =
|
||||
x - Point(N, j, a, b); // only thing that depends on [a,b]
|
||||
if (std::abs(dj) < 1e-10) {
|
||||
// exceptional case: x coincides with a Chebyshev point
|
||||
weightDerivatives.setZero();
|
||||
// compute the jth row of the differentiation matrix for this point
|
||||
double cj = (j == 0 || j == N - 1) ? 2. : 1.;
|
||||
for (size_t k = 0; k < N; k++) {
|
||||
if (j == 0 && k == 0) {
|
||||
// we reverse the sign since we order the cheb points from -1 to 1
|
||||
weightDerivatives(k) = -(cj * (N - 1) * (N - 1) + 1) / 6.0;
|
||||
} else if (j == N - 1 && k == N - 1) {
|
||||
// we reverse the sign since we order the cheb points from -1 to 1
|
||||
weightDerivatives(k) = (cj * (N - 1) * (N - 1) + 1) / 6.0;
|
||||
} else if (k == j) {
|
||||
double xj = Point(N, j);
|
||||
double xj2 = xj * xj;
|
||||
weightDerivatives(k) = -0.5 * xj / (1 - xj2);
|
||||
} else {
|
||||
double xj = Point(N, j);
|
||||
double xk = Point(N, k);
|
||||
double ck = (k == 0 || k == N - 1) ? 2. : 1.;
|
||||
t = ((j + k) % 2) == 0 ? 1 : -1;
|
||||
weightDerivatives(k) = (cj / ck) * t / (xj - xk);
|
||||
}
|
||||
}
|
||||
return 2 * weightDerivatives / (b - a);
|
||||
}
|
||||
distances(j) = dj;
|
||||
}
|
||||
|
||||
// This section of code computes the derivative of
|
||||
// the Barycentric Interpolation weights formula by applying
|
||||
// the chain rule on the original formula.
|
||||
|
||||
// g and k are multiplier terms which represent the derivatives of
|
||||
// the numerator and denominator
|
||||
double g = 0, k = 0;
|
||||
double w = 1;
|
||||
|
||||
for (size_t j = 0; j < N; j++) {
|
||||
if (j == 0 || j == N - 1) {
|
||||
w = 0.5;
|
||||
} else {
|
||||
w = 1.0;
|
||||
}
|
||||
|
||||
t = (j % 2 == 0) ? 1 : -1;
|
||||
|
||||
double c = t / distances(j);
|
||||
g += w * c;
|
||||
k += (w * c / distances(j));
|
||||
}
|
||||
|
||||
double s = 1; // changes sign s at every iteration
|
||||
double g2 = g * g;
|
||||
|
||||
for (size_t j = 0; j < N; j++, s = -s) {
|
||||
// Beginning of interval, j = 0, x0 = -1.0 and end of interval, j = N-1,
|
||||
// x0 = 1.0
|
||||
if (j == 0 || j == N - 1) {
|
||||
w = 0.5;
|
||||
} else {
|
||||
// All intermediate points j=1:N-2
|
||||
w = 1.0;
|
||||
}
|
||||
weightDerivatives(j) = (w * -s / (g * distances(j) * distances(j))) -
|
||||
(w * -s * k / (g2 * distances(j)));
|
||||
}
|
||||
|
||||
return weightDerivatives;
|
||||
}
|
||||
|
||||
Chebyshev2::DiffMatrix Chebyshev2::DifferentiationMatrix(size_t N, double a,
|
||||
double b) {
|
||||
DiffMatrix D(N, N);
|
||||
if (N == 1) {
|
||||
D(0, 0) = 1;
|
||||
return D;
|
||||
}
|
||||
|
||||
// toggle variable so we don't need to use `pow` for -1
|
||||
double t = -1;
|
||||
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
double xi = Point(N, i);
|
||||
double ci = (i == 0 || i == N - 1) ? 2. : 1.;
|
||||
for (size_t j = 0; j < N; j++) {
|
||||
if (i == 0 && j == 0) {
|
||||
// we reverse the sign since we order the cheb points from -1 to 1
|
||||
D(i, j) = -(ci * (N - 1) * (N - 1) + 1) / 6.0;
|
||||
} else if (i == N - 1 && j == N - 1) {
|
||||
// we reverse the sign since we order the cheb points from -1 to 1
|
||||
D(i, j) = (ci * (N - 1) * (N - 1) + 1) / 6.0;
|
||||
} else if (i == j) {
|
||||
double xi2 = xi * xi;
|
||||
D(i, j) = -xi / (2 * (1 - xi2));
|
||||
} else {
|
||||
double xj = Point(N, j);
|
||||
double cj = (j == 0 || j == N - 1) ? 2. : 1.;
|
||||
t = ((i + j) % 2) == 0 ? 1 : -1;
|
||||
D(i, j) = (ci / cj) * t / (xi - xj);
|
||||
}
|
||||
}
|
||||
}
|
||||
// scale the matrix to the range
|
||||
return D / ((b - a) / 2.0);
|
||||
}
|
||||
|
||||
Weights Chebyshev2::IntegrationWeights(size_t N, double a, double b) {
|
||||
// Allocate space for weights
|
||||
Weights weights(N);
|
||||
size_t K = N - 1, // number of intervals between N points
|
||||
K2 = K * K;
|
||||
weights(0) = 0.5 * (b - a) / (K2 + K % 2 - 1);
|
||||
weights(N - 1) = weights(0);
|
||||
|
||||
size_t last_k = K / 2 + K % 2 - 1;
|
||||
|
||||
for (size_t i = 1; i <= N - 2; ++i) {
|
||||
double theta = i * M_PI / K;
|
||||
weights(i) = (K % 2 == 0) ? 1 - cos(K * theta) / (K2 - 1) : 1;
|
||||
|
||||
for (size_t k = 1; k <= last_k; ++k)
|
||||
weights(i) -= 2 * cos(2 * k * theta) / (4 * k * k - 1);
|
||||
weights(i) *= (b - a) / K;
|
||||
}
|
||||
|
||||
return weights;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,148 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Chebyshev2.h
|
||||
* @brief Pseudo-spectral parameterization for Chebyshev polynomials of the
|
||||
* second kind.
|
||||
*
|
||||
* In a pseudo-spectral case, rather than the parameters acting as
|
||||
* weights for the bases polynomials (as in Chebyshev2Basis), here the
|
||||
* parameters are the *values* at a specific set of points in the interval, the
|
||||
* "Chebyshev points". These values uniquely determine the polynomial that
|
||||
* interpolates them at the Chebyshev points.
|
||||
*
|
||||
* This is different from Chebyshev.h since it leverage ideas from
|
||||
* pseudo-spectral optimization, i.e. we don't decompose into basis functions,
|
||||
* rather estimate function parameters that enforce function nodes at Chebyshev
|
||||
* points.
|
||||
*
|
||||
* Please refer to Agrawal21icra for more details.
|
||||
*
|
||||
* @author Varun Agrawal, Jing Dong, Frank Dellaert
|
||||
* @date July 4, 2020
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <gtsam/basis/Basis.h>
|
||||
|
||||
#include <boost/function.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Chebyshev Interpolation on Chebyshev points of the second kind
|
||||
* Note that N here, the number of points, is one less than N from
|
||||
* 'Approximation Theory and Approximation Practice by L. N. Trefethen (pg.42)'.
|
||||
*/
|
||||
class GTSAM_EXPORT Chebyshev2 : public Basis<Chebyshev2> {
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
using Base = Basis<Chebyshev2>;
|
||||
using Parameters = Eigen::Matrix<double, /*Nx1*/ -1, 1>;
|
||||
using DiffMatrix = Eigen::Matrix<double, /*NxN*/ -1, -1>;
|
||||
|
||||
/// Specific Chebyshev point
|
||||
static double Point(size_t N, int j) {
|
||||
assert(j >= 0 && size_t(j) < N);
|
||||
const double dtheta = M_PI / (N > 1 ? (N - 1) : 1);
|
||||
// We add -PI so that we get values ordered from -1 to +1
|
||||
// sin(- M_PI_2 + dtheta*j); also works
|
||||
return cos(-M_PI + dtheta * j);
|
||||
}
|
||||
|
||||
/// Specific Chebyshev point, within [a,b] interval
|
||||
static double Point(size_t N, int j, double a, double b) {
|
||||
assert(j >= 0 && size_t(j) < N);
|
||||
const double dtheta = M_PI / (N - 1);
|
||||
// We add -PI so that we get values ordered from -1 to +1
|
||||
return a + (b - a) * (1. + cos(-M_PI + dtheta * j)) / 2;
|
||||
}
|
||||
|
||||
/// All Chebyshev points
|
||||
static Vector Points(size_t N) {
|
||||
Vector points(N);
|
||||
for (size_t j = 0; j < N; j++) points(j) = Point(N, j);
|
||||
return points;
|
||||
}
|
||||
|
||||
/// All Chebyshev points, within [a,b] interval
|
||||
static Vector Points(size_t N, double a, double b) {
|
||||
Vector points = Points(N);
|
||||
const double T1 = (a + b) / 2, T2 = (b - a) / 2;
|
||||
points = T1 + (T2 * points).array();
|
||||
return points;
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluate Chebyshev Weights on [-1,1] at any x up to order N-1 (N values)
|
||||
* These weights implement barycentric interpolation at a specific x.
|
||||
* More precisely, f(x) ~ [w0;...;wN] * [f0;...;fN], where the fj are the
|
||||
* values of the function f at the Chebyshev points. As such, for a given x we
|
||||
* obtain a linear map from parameter vectors f to interpolated values f(x).
|
||||
* Optional [a,b] interval can be specified as well.
|
||||
*/
|
||||
static Weights CalculateWeights(size_t N, double x, double a = -1,
|
||||
double b = 1);
|
||||
|
||||
/**
|
||||
* Evaluate derivative of barycentric weights.
|
||||
* This is easy and efficient via the DifferentiationMatrix.
|
||||
*/
|
||||
static Weights DerivativeWeights(size_t N, double x, double a = -1,
|
||||
double b = 1);
|
||||
|
||||
/// compute D = differentiation matrix, Trefethen00book p.53
|
||||
/// when given a parameter vector f of function values at the Chebyshev
|
||||
/// points, D*f are the values of f'.
|
||||
/// https://people.maths.ox.ac.uk/trefethen/8all.pdf Theorem 8.4
|
||||
static DiffMatrix DifferentiationMatrix(size_t N, double a = -1,
|
||||
double b = 1);
|
||||
|
||||
/**
|
||||
* Evaluate Clenshaw-Curtis integration weights.
|
||||
* Trefethen00book, pg 128, clencurt.m
|
||||
* Note that N in clencurt.m is 1 less than our N
|
||||
* K = N-1;
|
||||
theta = pi*(0:K)'/K;
|
||||
w = zeros(1,N); ii = 2:K; v = ones(K-1, 1);
|
||||
if mod(K,2) == 0
|
||||
w(1) = 1/(K^2-1); w(N) = w(1);
|
||||
for k=1:K/2-1, v = v-2*cos(2*k*theta(ii))/(4*k^2-1); end
|
||||
v = v - cos(K*theta(ii))/(K^2-1);
|
||||
else
|
||||
w(1) = 1/K^2; w(N) = w(1);
|
||||
for k=1:K/2, v = v-2*cos(2*k*theta(ii))/(4*k^2-1); end
|
||||
end
|
||||
w(ii) = 2*v/K;
|
||||
|
||||
*/
|
||||
static Weights IntegrationWeights(size_t N, double a = -1, double b = 1);
|
||||
|
||||
/**
|
||||
* Create matrix of values at Chebyshev points given vector-valued function.
|
||||
*/
|
||||
template <size_t M>
|
||||
static Matrix matrix(boost::function<Eigen::Matrix<double, M, 1>(double)> f,
|
||||
size_t N, double a = -1, double b = 1) {
|
||||
Matrix Xmat(M, N);
|
||||
for (size_t j = 0; j < N; j++) {
|
||||
Xmat.col(j) = f(Point(N, j, a, b));
|
||||
}
|
||||
return Xmat;
|
||||
}
|
||||
}; // \ Chebyshev2
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,99 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file FitBasis.h
|
||||
* @date July 4, 2020
|
||||
* @author Varun Agrawal, Frank Dellaert
|
||||
* @brief Fit a Basis using least-squares
|
||||
*/
|
||||
|
||||
/*
|
||||
* Concept needed for LS. Parameters = Coefficients | Values
|
||||
* - Parameters, Jacobian
|
||||
* - PredictFactor(double x)(Parameters p, OptionalJacobian<1,N> H)
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/basis/Basis.h>
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/// Our sequence representation is a map of {x: y} values where y = f(x)
|
||||
using Sequence = std::map<double, double>;
|
||||
/// A sample is a key-value pair from a sequence.
|
||||
using Sample = std::pair<double, double>;
|
||||
|
||||
/**
|
||||
* Class that does regression via least squares
|
||||
* Example usage:
|
||||
* size_t N = 3;
|
||||
* auto fit = FitBasis<Chebyshev2>(data_points, noise_model, N);
|
||||
* Vector coefficients = fit.parameters();
|
||||
*
|
||||
* where `data_points` are a map from `x` to `y` values indicating a function
|
||||
* mapping at specific points, `noise_model` is the gaussian noise model, and
|
||||
* `N` is the degree of the polynomial basis used to fit the function.
|
||||
*/
|
||||
template <class Basis>
|
||||
class FitBasis {
|
||||
public:
|
||||
using Parameters = typename Basis::Parameters;
|
||||
|
||||
private:
|
||||
Parameters parameters_;
|
||||
|
||||
public:
|
||||
/// Create nonlinear FG from Sequence
|
||||
static NonlinearFactorGraph NonlinearGraph(const Sequence& sequence,
|
||||
const SharedNoiseModel& model,
|
||||
size_t N) {
|
||||
NonlinearFactorGraph graph;
|
||||
for (const Sample sample : sequence) {
|
||||
graph.emplace_shared<EvaluationFactor<Basis>>(0, sample.second, model, N,
|
||||
sample.first);
|
||||
}
|
||||
return graph;
|
||||
}
|
||||
|
||||
/// Create linear FG from Sequence
|
||||
static GaussianFactorGraph::shared_ptr LinearGraph(
|
||||
const Sequence& sequence, const SharedNoiseModel& model, size_t N) {
|
||||
NonlinearFactorGraph graph = NonlinearGraph(sequence, model, N);
|
||||
Values values;
|
||||
values.insert<Parameters>(0, Parameters::Zero(N));
|
||||
GaussianFactorGraph::shared_ptr gfg = graph.linearize(values);
|
||||
return gfg;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Construct a new FitBasis object.
|
||||
*
|
||||
* @param sequence map of x->y values for a function, a.k.a. y = f(x).
|
||||
* @param model The noise model to use.
|
||||
* @param N The degree of the polynomial to fit.
|
||||
*/
|
||||
FitBasis(const Sequence& sequence, const SharedNoiseModel& model, size_t N) {
|
||||
GaussianFactorGraph::shared_ptr gfg = LinearGraph(sequence, model, N);
|
||||
VectorValues solution = gfg->optimize();
|
||||
parameters_ = solution.at(0);
|
||||
}
|
||||
|
||||
/// Return Fourier coefficients
|
||||
Parameters parameters() const { return parameters_; }
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,108 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Fourier.h
|
||||
* @brief Fourier decomposition, see e.g.
|
||||
* http://mathworld.wolfram.com/FourierSeries.html
|
||||
* @author Varun Agrawal, Frank Dellaert
|
||||
* @date July 4, 2020
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/basis/Basis.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/// Fourier basis
|
||||
class GTSAM_EXPORT FourierBasis : public Basis<FourierBasis> {
|
||||
public:
|
||||
using Parameters = Eigen::Matrix<double, /*Nx1*/ -1, 1>;
|
||||
using DiffMatrix = Eigen::Matrix<double, /*NxN*/ -1, -1>;
|
||||
|
||||
/**
|
||||
* @brief Evaluate Real Fourier Weights of size N in interval [a, b],
|
||||
* e.g. N=5 yields bases: 1, cos(x), sin(x), cos(2*x), sin(2*x)
|
||||
*
|
||||
* @param N The degree of the polynomial to use.
|
||||
* @param x The point at which to compute the derivaive weights.
|
||||
* @return Weights
|
||||
*/
|
||||
static Weights CalculateWeights(size_t N, double x) {
|
||||
Weights b(N);
|
||||
b[0] = 1;
|
||||
for (size_t i = 1, n = 1; i < N; i += 2, n++) {
|
||||
b[i] = cos(n * x);
|
||||
b[i + 1] = sin(n * x);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Evaluate Real Fourier Weights of size N in interval [a, b],
|
||||
* e.g. N=5 yields bases: 1, cos(x), sin(x), cos(2*x), sin(2*x)
|
||||
*
|
||||
* @param N The degree of the polynomial to use.
|
||||
* @param x The point at which to compute the weights.
|
||||
* @param a Lower bound of interval.
|
||||
* @param b Upper bound of interval.
|
||||
* @return Weights
|
||||
*/
|
||||
static Weights CalculateWeights(size_t N, double x, double a, double b) {
|
||||
// TODO(Varun) How do we enforce an interval for Fourier series?
|
||||
return CalculateWeights(N, x);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute D = differentiation matrix.
|
||||
* Given coefficients c of a Fourier series c, D*c are the values of c'.
|
||||
*/
|
||||
static DiffMatrix DifferentiationMatrix(size_t N) {
|
||||
DiffMatrix D = DiffMatrix::Zero(N, N);
|
||||
double k = 1;
|
||||
for (size_t i = 1; i < N; i += 2) {
|
||||
D(i, i + 1) = k; // sin'(k*x) = k*cos(k*x)
|
||||
D(i + 1, i) = -k; // cos'(k*x) = -k*sin(k*x)
|
||||
k += 1;
|
||||
}
|
||||
|
||||
return D;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get weights at a given x that calculate the derivative.
|
||||
*
|
||||
* @param N The degree of the polynomial to use.
|
||||
* @param x The point at which to compute the derivaive weights.
|
||||
* @return Weights
|
||||
*/
|
||||
static Weights DerivativeWeights(size_t N, double x) {
|
||||
return CalculateWeights(N, x) * DifferentiationMatrix(N);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get derivative weights at a given x that calculate the derivative,
|
||||
in the interval [a, b].
|
||||
*
|
||||
* @param N The degree of the polynomial to use.
|
||||
* @param x The point at which to compute the derivaive weights.
|
||||
* @param a Lower bound of interval.
|
||||
* @param b Upper bound of interval.
|
||||
* @return Weights
|
||||
*/
|
||||
static Weights DerivativeWeights(size_t N, double x, double a, double b) {
|
||||
return CalculateWeights(N, x, a, b) * DifferentiationMatrix(N);
|
||||
}
|
||||
|
||||
}; // FourierBasis
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,215 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file ParamaterMatrix.h
|
||||
* @brief Define ParameterMatrix class which is used to store values at
|
||||
* interpolation points.
|
||||
* @author Varun Agrawal, Frank Dellaert
|
||||
* @date September 21, 2020
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/VectorSpace.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* A matrix abstraction of MxN values at the Basis points.
|
||||
* This class serves as a wrapper over an Eigen matrix.
|
||||
* @tparam M: The dimension of the type you wish to evaluate.
|
||||
* @param N: the number of Basis points (e.g. Chebyshev points of the second
|
||||
* kind).
|
||||
*/
|
||||
template <int M>
|
||||
class ParameterMatrix {
|
||||
using MatrixType = Eigen::Matrix<double, M, -1>;
|
||||
|
||||
private:
|
||||
MatrixType matrix_;
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
enum { dimension = Eigen::Dynamic };
|
||||
|
||||
/**
|
||||
* Create ParameterMatrix using the number of basis points.
|
||||
* @param N: The number of basis points (the columns).
|
||||
*/
|
||||
ParameterMatrix(const size_t N) : matrix_(M, N) { matrix_.setZero(); }
|
||||
|
||||
/**
|
||||
* Create ParameterMatrix from an MxN Eigen Matrix.
|
||||
* @param matrix: An Eigen matrix used to initialze the ParameterMatrix.
|
||||
*/
|
||||
ParameterMatrix(const MatrixType& matrix) : matrix_(matrix) {}
|
||||
|
||||
/// Get the number of rows.
|
||||
size_t rows() const { return matrix_.rows(); }
|
||||
|
||||
/// Get the number of columns.
|
||||
size_t cols() const { return matrix_.cols(); }
|
||||
|
||||
/// Get the underlying matrix.
|
||||
MatrixType matrix() const { return matrix_; }
|
||||
|
||||
/// Return the tranpose of the underlying matrix.
|
||||
Eigen::Matrix<double, -1, M> transpose() const { return matrix_.transpose(); }
|
||||
|
||||
/**
|
||||
* Get the matrix row specified by `index`.
|
||||
* @param index: The row index to retrieve.
|
||||
*/
|
||||
Eigen::Matrix<double, 1, -1> row(size_t index) const {
|
||||
return matrix_.row(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the matrix row specified by `index`.
|
||||
* @param index: The row index to set.
|
||||
*/
|
||||
auto row(size_t index) -> Eigen::Block<MatrixType, 1, -1, false> {
|
||||
return matrix_.row(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the matrix column specified by `index`.
|
||||
* @param index: The column index to retrieve.
|
||||
*/
|
||||
Eigen::Matrix<double, M, 1> col(size_t index) const {
|
||||
return matrix_.col(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the matrix column specified by `index`.
|
||||
* @param index: The column index to set.
|
||||
*/
|
||||
auto col(size_t index) -> Eigen::Block<MatrixType, M, 1, true> {
|
||||
return matrix_.col(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set all matrix coefficients to zero.
|
||||
*/
|
||||
void setZero() { matrix_.setZero(); }
|
||||
|
||||
/**
|
||||
* Add a ParameterMatrix to another.
|
||||
* @param other: ParameterMatrix to add.
|
||||
*/
|
||||
ParameterMatrix<M> operator+(const ParameterMatrix<M>& other) const {
|
||||
return ParameterMatrix<M>(matrix_ + other.matrix());
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a MxN-sized vector to the ParameterMatrix.
|
||||
* @param other: Vector which is reshaped and added.
|
||||
*/
|
||||
ParameterMatrix<M> operator+(
|
||||
const Eigen::Matrix<double, -1, 1>& other) const {
|
||||
// This form avoids a deep copy and instead typecasts `other`.
|
||||
Eigen::Map<const MatrixType> other_(other.data(), M, cols());
|
||||
return ParameterMatrix<M>(matrix_ + other_);
|
||||
}
|
||||
|
||||
/**
|
||||
* Subtract a ParameterMatrix from another.
|
||||
* @param other: ParameterMatrix to subtract.
|
||||
*/
|
||||
ParameterMatrix<M> operator-(const ParameterMatrix<M>& other) const {
|
||||
return ParameterMatrix<M>(matrix_ - other.matrix());
|
||||
}
|
||||
|
||||
/**
|
||||
* Subtract a MxN-sized vector from the ParameterMatrix.
|
||||
* @param other: Vector which is reshaped and subracted.
|
||||
*/
|
||||
ParameterMatrix<M> operator-(
|
||||
const Eigen::Matrix<double, -1, 1>& other) const {
|
||||
Eigen::Map<const MatrixType> other_(other.data(), M, cols());
|
||||
return ParameterMatrix<M>(matrix_ - other_);
|
||||
}
|
||||
|
||||
/**
|
||||
* Multiply ParameterMatrix with an Eigen matrix.
|
||||
* @param other: Eigen matrix which should be multiplication compatible with
|
||||
* the ParameterMatrix.
|
||||
*/
|
||||
MatrixType operator*(const Eigen::Matrix<double, -1, -1>& other) const {
|
||||
return matrix_ * other;
|
||||
}
|
||||
|
||||
/// @name Vector Space requirements, following LieMatrix
|
||||
/// @{
|
||||
|
||||
/**
|
||||
* Print the ParameterMatrix.
|
||||
* @param s: The prepend string to add more contextual info.
|
||||
*/
|
||||
void print(const std::string& s = "") const {
|
||||
std::cout << (s == "" ? s : s + " ") << matrix_ << std::endl;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for equality up to absolute tolerance.
|
||||
* @param other: The ParameterMatrix to check equality with.
|
||||
* @param tol: The absolute tolerance threshold.
|
||||
*/
|
||||
bool equals(const ParameterMatrix<M>& other, double tol = 1e-8) const {
|
||||
return gtsam::equal_with_abs_tol(matrix_, other.matrix(), tol);
|
||||
}
|
||||
|
||||
/// Returns dimensionality of the tangent space
|
||||
inline size_t dim() const { return matrix_.size(); }
|
||||
|
||||
/// Convert to vector form, is done row-wise
|
||||
inline Vector vector() const {
|
||||
using RowMajor = Eigen::Matrix<double, -1, -1, Eigen::RowMajor>;
|
||||
Vector result(matrix_.size());
|
||||
Eigen::Map<RowMajor>(&result(0), rows(), cols()) = matrix_;
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Identity function to satisfy VectorSpace traits.
|
||||
*
|
||||
* NOTE: The size at compile time is unknown so this identity is zero
|
||||
* length and thus not valid.
|
||||
*/
|
||||
inline static ParameterMatrix identity() {
|
||||
// throw std::runtime_error(
|
||||
// "ParameterMatrix::identity(): Don't use this function");
|
||||
return ParameterMatrix(0);
|
||||
}
|
||||
|
||||
/// @}
|
||||
};
|
||||
|
||||
// traits for ParameterMatrix
|
||||
template <int M>
|
||||
struct traits<ParameterMatrix<M>>
|
||||
: public internal::VectorSpace<ParameterMatrix<M>> {};
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Stream operator that takes a ParameterMatrix. Used for printing.
|
||||
template <int M>
|
||||
inline std::ostream& operator<<(std::ostream& os,
|
||||
const ParameterMatrix<M>& parameterMatrix) {
|
||||
os << parameterMatrix.matrix();
|
||||
return os;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,146 @@
|
|||
//*************************************************************************
|
||||
// basis
|
||||
//*************************************************************************
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
// TODO(gerry): add all the Functors to the Basis interfaces, e.g.
|
||||
// `EvaluationFunctor`
|
||||
|
||||
#include <gtsam/basis/Fourier.h>
|
||||
|
||||
class FourierBasis {
|
||||
static Vector CalculateWeights(size_t N, double x);
|
||||
static Matrix WeightMatrix(size_t N, Vector x);
|
||||
|
||||
static Matrix DifferentiationMatrix(size_t N);
|
||||
static Vector DerivativeWeights(size_t N, double x);
|
||||
};
|
||||
|
||||
#include <gtsam/basis/Chebyshev.h>
|
||||
|
||||
class Chebyshev1Basis {
|
||||
static Matrix CalculateWeights(size_t N, double x);
|
||||
static Matrix WeightMatrix(size_t N, Vector X);
|
||||
};
|
||||
|
||||
class Chebyshev2Basis {
|
||||
static Matrix CalculateWeights(size_t N, double x);
|
||||
static Matrix WeightMatrix(size_t N, Vector x);
|
||||
};
|
||||
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
class Chebyshev2 {
|
||||
static double Point(size_t N, int j);
|
||||
static double Point(size_t N, int j, double a, double b);
|
||||
|
||||
static Vector Points(size_t N);
|
||||
static Vector Points(size_t N, double a, double b);
|
||||
|
||||
static Matrix WeightMatrix(size_t N, Vector X);
|
||||
static Matrix WeightMatrix(size_t N, Vector X, double a, double b);
|
||||
|
||||
static Matrix CalculateWeights(size_t N, double x, double a, double b);
|
||||
static Matrix DerivativeWeights(size_t N, double x, double a, double b);
|
||||
static Matrix IntegrationWeights(size_t N, double a, double b);
|
||||
static Matrix DifferentiationMatrix(size_t N, double a, double b);
|
||||
|
||||
// TODO Needs OptionalJacobian
|
||||
// static double Derivative(double x, Vector f);
|
||||
};
|
||||
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
|
||||
template <M = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}>
|
||||
class ParameterMatrix {
|
||||
ParameterMatrix(const size_t N);
|
||||
ParameterMatrix(const Matrix& matrix);
|
||||
|
||||
Matrix matrix() const;
|
||||
|
||||
void print(const string& s = "") const;
|
||||
};
|
||||
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
|
||||
template <BASIS = {gtsam::Chebyshev2, gtsam::Chebyshev1Basis,
|
||||
gtsam::Chebyshev2Basis, gtsam::FourierBasis}>
|
||||
virtual class EvaluationFactor : gtsam::NoiseModelFactor {
|
||||
EvaluationFactor();
|
||||
EvaluationFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x);
|
||||
EvaluationFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x, double a, double b);
|
||||
};
|
||||
|
||||
template <BASIS, M>
|
||||
virtual class VectorEvaluationFactor : gtsam::NoiseModelFactor {
|
||||
VectorEvaluationFactor();
|
||||
VectorEvaluationFactor(gtsam::Key key, const Vector& z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x);
|
||||
VectorEvaluationFactor(gtsam::Key key, const Vector& z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x, double a, double b);
|
||||
};
|
||||
|
||||
// TODO(Varun) Better way to support arbitrary dimensions?
|
||||
// Especially if users mainly do `pip install gtsam` for the Python wrapper.
|
||||
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 3>
|
||||
VectorEvaluationFactorChebyshev2D3;
|
||||
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 4>
|
||||
VectorEvaluationFactorChebyshev2D4;
|
||||
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 12>
|
||||
VectorEvaluationFactorChebyshev2D12;
|
||||
|
||||
template <BASIS, P>
|
||||
virtual class VectorComponentFactor : gtsam::NoiseModelFactor {
|
||||
VectorComponentFactor();
|
||||
VectorComponentFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
size_t i, double x);
|
||||
VectorComponentFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
size_t i, double x, double a, double b);
|
||||
};
|
||||
|
||||
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 3>
|
||||
VectorComponentFactorChebyshev2D3;
|
||||
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 4>
|
||||
VectorComponentFactorChebyshev2D4;
|
||||
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 12>
|
||||
VectorComponentFactorChebyshev2D12;
|
||||
|
||||
template <BASIS, T>
|
||||
virtual class ManifoldEvaluationFactor : gtsam::NoiseModelFactor {
|
||||
ManifoldEvaluationFactor();
|
||||
ManifoldEvaluationFactor(gtsam::Key key, const T& z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x);
|
||||
ManifoldEvaluationFactor(gtsam::Key key, const T& z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x, double a, double b);
|
||||
};
|
||||
|
||||
// TODO(gerry): Add `DerivativeFactor`, `VectorDerivativeFactor`, and
|
||||
// `ComponentDerivativeFactor`
|
||||
|
||||
#include <gtsam/basis/FitBasis.h>
|
||||
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
|
||||
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
|
||||
class FitBasis {
|
||||
FitBasis(const std::map<double, double>& sequence,
|
||||
const gtsam::noiseModel::Base* model, size_t N);
|
||||
|
||||
static gtsam::NonlinearFactorGraph NonlinearGraph(
|
||||
const std::map<double, double>& sequence,
|
||||
const gtsam::noiseModel::Base* model, size_t N);
|
||||
static gtsam::GaussianFactorGraph::shared_ptr LinearGraph(
|
||||
const std::map<double, double>& sequence,
|
||||
const gtsam::noiseModel::Base* model, size_t N);
|
||||
Parameters parameters() const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1 @@
|
|||
gtsamAddTestsGlob(basis "test*.cpp" "" "gtsam")
|
|
@ -0,0 +1,236 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testChebyshev.cpp
|
||||
* @date July 4, 2020
|
||||
* @author Varun Agrawal
|
||||
* @brief Unit tests for Chebyshev Basis Decompositions
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/basis/Chebyshev.h>
|
||||
#include <gtsam/basis/FitBasis.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
auto model = noiseModel::Unit::Create(1);
|
||||
|
||||
const size_t N = 3;
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev, Chebyshev1) {
|
||||
using Synth = Chebyshev1Basis::EvaluationFunctor;
|
||||
Vector c(N);
|
||||
double x;
|
||||
c << 12, 3, 1;
|
||||
x = -1.0;
|
||||
EXPECT_DOUBLES_EQUAL(12 + 3 * x + 2 * x * x - 1, Synth(N, x)(c), 1e-9);
|
||||
x = -0.5;
|
||||
EXPECT_DOUBLES_EQUAL(12 + 3 * x + 2 * x * x - 1, Synth(N, x)(c), 1e-9);
|
||||
x = 0.3;
|
||||
EXPECT_DOUBLES_EQUAL(12 + 3 * x + 2 * x * x - 1, Synth(N, x)(c), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev, Chebyshev2) {
|
||||
using Synth = Chebyshev2Basis::EvaluationFunctor;
|
||||
Vector c(N);
|
||||
double x;
|
||||
c << 12, 3, 1;
|
||||
x = -1.0;
|
||||
EXPECT_DOUBLES_EQUAL(12 + 6 * x + 4 * x * x - 1, Synth(N, x)(c), 1e-9);
|
||||
x = -0.5;
|
||||
EXPECT_DOUBLES_EQUAL(12 + 6 * x + 4 * x * x - 1, Synth(N, x)(c), 1e-9);
|
||||
x = 0.3;
|
||||
EXPECT_DOUBLES_EQUAL(12 + 6 * x + 4 * x * x - 1, Synth(N, x)(c), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev, Evaluation) {
|
||||
Chebyshev1Basis::EvaluationFunctor fx(N, 0.5);
|
||||
Vector c(N);
|
||||
c << 3, 5, -12;
|
||||
EXPECT_DOUBLES_EQUAL(11.5, fx(c), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||
#include <gtsam/nonlinear/Marginals.h>
|
||||
TEST(Chebyshev, Expression) {
|
||||
// Create linear factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
Key key(1);
|
||||
|
||||
// Let's pretend we have 6 GPS measurements (we just do x coordinate)
|
||||
// at times
|
||||
const size_t m = 6;
|
||||
Vector t(m);
|
||||
t << -0.7, -0.4, 0.1, 0.3, 0.7, 0.9;
|
||||
Vector x(m);
|
||||
x << -0.7, -0.4, 0.1, 0.3, 0.7, 0.9;
|
||||
|
||||
for (size_t i = 0; i < m; i++) {
|
||||
graph.emplace_shared<EvaluationFactor<Chebyshev1Basis>>(key, x(i), model, N,
|
||||
t(i));
|
||||
}
|
||||
|
||||
// Solve
|
||||
Values initial;
|
||||
initial.insert<Vector>(key, Vector::Zero(N)); // initial does not matter
|
||||
|
||||
// ... and optimize
|
||||
GaussNewtonParams parameters;
|
||||
GaussNewtonOptimizer optimizer(graph, initial, parameters);
|
||||
Values result = optimizer.optimize();
|
||||
|
||||
// Check
|
||||
Vector expected(N);
|
||||
expected << 0, 1, 0;
|
||||
Vector actual_c = result.at<Vector>(key);
|
||||
EXPECT(assert_equal(expected, actual_c));
|
||||
|
||||
// Calculate and print covariances
|
||||
Marginals marginals(graph, result);
|
||||
Matrix3 cov = marginals.marginalCovariance(key);
|
||||
EXPECT_DOUBLES_EQUAL(0.626, cov(1, 1), 1e-3);
|
||||
|
||||
// Predict x at time 1.0
|
||||
Chebyshev1Basis::EvaluationFunctor f(N, 1.0);
|
||||
Matrix H;
|
||||
double actual = f(actual_c, H);
|
||||
EXPECT_DOUBLES_EQUAL(1.0, actual, 1e-9);
|
||||
|
||||
// Calculate predictive variance on prediction
|
||||
double actual_variance_on_prediction = (H * cov * H.transpose())(0);
|
||||
EXPECT_DOUBLES_EQUAL(1.1494, actual_variance_on_prediction, 1e-4);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev, Decomposition) {
|
||||
const size_t M = 16;
|
||||
|
||||
// Create example sequence
|
||||
Sequence sequence;
|
||||
for (size_t i = 0; i < M; i++) {
|
||||
double x = ((double)i / M); // - 0.99;
|
||||
double y = x;
|
||||
sequence[x] = y;
|
||||
}
|
||||
|
||||
// Do Chebyshev Decomposition
|
||||
FitBasis<Chebyshev1Basis> actual(sequence, model, N);
|
||||
|
||||
// Check
|
||||
Vector expected = Vector::Zero(N);
|
||||
expected(1) = 1;
|
||||
EXPECT(assert_equal(expected, (Vector)actual.parameters(), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev1, Derivative) {
|
||||
Vector c(N);
|
||||
c << 12, 3, 2;
|
||||
|
||||
Weights D;
|
||||
|
||||
double x = -1.0;
|
||||
D = Chebyshev1Basis::DerivativeWeights(N, x);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(-5, (D * c)(0), 1e-9);
|
||||
|
||||
x = -0.5;
|
||||
D = Chebyshev1Basis::DerivativeWeights(N, x);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(-1, (D * c)(0), 1e-9);
|
||||
|
||||
x = 0.3;
|
||||
D = Chebyshev1Basis::DerivativeWeights(N, x);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(5.4, (D * c)(0), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
Vector3 f(-6, 1, 0.5);
|
||||
|
||||
double proxy1(double x, size_t N) {
|
||||
return Chebyshev1Basis::EvaluationFunctor(N, x)(Vector(f));
|
||||
}
|
||||
|
||||
TEST(Chebyshev1, Derivative2) {
|
||||
const double x = 0.5;
|
||||
auto D = Chebyshev1Basis::DerivativeWeights(N, x);
|
||||
|
||||
Matrix numeric_dTdx =
|
||||
numericalDerivative21<double, double, double>(proxy1, x, N);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(2, numeric_dTdx(0, 0), 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(2, (D * f)(0), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, Derivative) {
|
||||
Vector c(N);
|
||||
c << 12, 6, 2;
|
||||
|
||||
Weights D;
|
||||
|
||||
double x = -1.0;
|
||||
CHECK_EXCEPTION(Chebyshev2Basis::DerivativeWeights(N, x), std::runtime_error);
|
||||
x = 1.0;
|
||||
CHECK_EXCEPTION(Chebyshev2Basis::DerivativeWeights(N, x), std::runtime_error);
|
||||
|
||||
x = -0.5;
|
||||
D = Chebyshev2Basis::DerivativeWeights(N, x);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(4, (D * c)(0), 1e-9);
|
||||
|
||||
x = 0.3;
|
||||
D = Chebyshev2Basis::DerivativeWeights(N, x);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(16.8, (D * c)(0), 1e-9);
|
||||
|
||||
x = 0.75;
|
||||
D = Chebyshev2Basis::DerivativeWeights(N, x);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(24, (D * c)(0), 1e-9);
|
||||
|
||||
x = 10;
|
||||
D = Chebyshev2Basis::DerivativeWeights(N, x, 0, 20);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(12, (D * c)(0), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
double proxy2(double x, size_t N) {
|
||||
return Chebyshev2Basis::EvaluationFunctor(N, x)(Vector(f));
|
||||
}
|
||||
|
||||
TEST(Chebyshev2, Derivative2) {
|
||||
const double x = 0.5;
|
||||
auto D = Chebyshev2Basis::DerivativeWeights(N, x);
|
||||
|
||||
Matrix numeric_dTdx =
|
||||
numericalDerivative21<double, double, double>(proxy2, x, N);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(4, numeric_dTdx(0, 0), 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(4, (D * f)(0), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
//******************************************************************************
|
|
@ -0,0 +1,435 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testChebyshev.cpp
|
||||
* @date July 4, 2020
|
||||
* @author Varun Agrawal
|
||||
* @brief Unit tests for Chebyshev Basis Decompositions via pseudo-spectral
|
||||
* methods
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/basis/FitBasis.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
using namespace boost::placeholders;
|
||||
|
||||
noiseModel::Diagonal::shared_ptr model = noiseModel::Unit::Create(1);
|
||||
|
||||
const size_t N = 32;
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, Point) {
|
||||
static const int N = 5;
|
||||
auto points = Chebyshev2::Points(N);
|
||||
Vector expected(N);
|
||||
expected << -1., -sqrt(2.) / 2., 0., sqrt(2.) / 2., 1.;
|
||||
static const double tol = 1e-15; // changing this reveals errors
|
||||
EXPECT_DOUBLES_EQUAL(expected(0), points(0), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(1), points(1), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(2), points(2), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(3), points(3), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(4), points(4), tol);
|
||||
|
||||
// Check symmetry
|
||||
EXPECT_DOUBLES_EQUAL(Chebyshev2::Point(N, 0), -Chebyshev2::Point(N, 4), tol);
|
||||
EXPECT_DOUBLES_EQUAL(Chebyshev2::Point(N, 1), -Chebyshev2::Point(N, 3), tol);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, PointInInterval) {
|
||||
static const int N = 5;
|
||||
auto points = Chebyshev2::Points(N, 0, 20);
|
||||
Vector expected(N);
|
||||
expected << 0., 1. - sqrt(2.) / 2., 1., 1. + sqrt(2.) / 2., 2.;
|
||||
expected *= 10.0;
|
||||
static const double tol = 1e-15; // changing this reveals errors
|
||||
EXPECT_DOUBLES_EQUAL(expected(0), points(0), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(1), points(1), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(2), points(2), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(3), points(3), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(4), points(4), tol);
|
||||
|
||||
// all at once
|
||||
Vector actual = Chebyshev2::Points(N, 0, 20);
|
||||
CHECK(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// InterpolatingPolynomial[{{-1, 4}, {0, 2}, {1, 6}}, 0.5]
|
||||
TEST(Chebyshev2, Interpolate2) {
|
||||
size_t N = 3;
|
||||
Chebyshev2::EvaluationFunctor fx(N, 0.5);
|
||||
Vector f(N);
|
||||
f << 4, 2, 6;
|
||||
EXPECT_DOUBLES_EQUAL(3.25, fx(f), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// InterpolatingPolynomial[{{0, 4}, {1, 2}, {2, 6}}, 1.5]
|
||||
TEST(Chebyshev2, Interpolate2_Interval) {
|
||||
Chebyshev2::EvaluationFunctor fx(3, 1.5, 0, 2);
|
||||
Vector3 f(4, 2, 6);
|
||||
EXPECT_DOUBLES_EQUAL(3.25, fx(f), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// InterpolatingPolynomial[{{-1, 4}, {-Sqrt[2]/2, 2}, {0, 6}, {Sqrt[2]/2,3}, {1,
|
||||
// 3}}, 0.5]
|
||||
TEST(Chebyshev2, Interpolate5) {
|
||||
Chebyshev2::EvaluationFunctor fx(5, 0.5);
|
||||
Vector f(5);
|
||||
f << 4, 2, 6, 3, 3;
|
||||
EXPECT_DOUBLES_EQUAL(4.34283, fx(f), 1e-5);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Interpolating vectors
|
||||
TEST(Chebyshev2, InterpolateVector) {
|
||||
double t = 30, a = 0, b = 100;
|
||||
const size_t N = 3;
|
||||
// Create 2x3 matrix with Vectors at Chebyshev points
|
||||
ParameterMatrix<2> X(N);
|
||||
X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp
|
||||
|
||||
// Check value
|
||||
Vector expected(2);
|
||||
expected << t, 0;
|
||||
Eigen::Matrix<double, /*2x2N*/ -1, -1> actualH(2, 2 * N);
|
||||
|
||||
Chebyshev2::VectorEvaluationFunctor<2> fx(N, t, a, b);
|
||||
EXPECT(assert_equal(expected, fx(X, actualH), 1e-9));
|
||||
|
||||
// Check derivative
|
||||
boost::function<Vector2(ParameterMatrix<2>)> f = boost::bind(
|
||||
&Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, _1, boost::none);
|
||||
Matrix numericalH =
|
||||
numericalDerivative11<Vector2, ParameterMatrix<2>, 2 * N>(f, X);
|
||||
EXPECT(assert_equal(numericalH, actualH, 1e-9));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, Decomposition) {
|
||||
// Create example sequence
|
||||
Sequence sequence;
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
double x = (double)i / 16. - 0.99, y = x;
|
||||
sequence[x] = y;
|
||||
}
|
||||
|
||||
// Do Chebyshev Decomposition
|
||||
FitBasis<Chebyshev2> actual(sequence, model, 3);
|
||||
|
||||
// Check
|
||||
Vector expected(3);
|
||||
expected << -1, 0, 1;
|
||||
EXPECT(assert_equal(expected, actual.parameters(), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, DifferentiationMatrix3) {
|
||||
// Trefethen00book, p.55
|
||||
const size_t N = 3;
|
||||
Matrix expected(N, N);
|
||||
// Differentiation matrix computed from Chebfun
|
||||
expected << 1.5000, -2.0000, 0.5000, //
|
||||
0.5000, -0.0000, -0.5000, //
|
||||
-0.5000, 2.0000, -1.5000;
|
||||
// multiply by -1 since the cheb points have a phase shift wrt Trefethen
|
||||
// This was verified with chebfun
|
||||
expected = -expected;
|
||||
|
||||
Matrix actual = Chebyshev2::DifferentiationMatrix(N);
|
||||
EXPECT(assert_equal(expected, actual, 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, DerivativeMatrix6) {
|
||||
// Trefethen00book, p.55
|
||||
const size_t N = 6;
|
||||
Matrix expected(N, N);
|
||||
expected << 8.5000, -10.4721, 2.8944, -1.5279, 1.1056, -0.5000, //
|
||||
2.6180, -1.1708, -2.0000, 0.8944, -0.6180, 0.2764, //
|
||||
-0.7236, 2.0000, -0.1708, -1.6180, 0.8944, -0.3820, //
|
||||
0.3820, -0.8944, 1.6180, 0.1708, -2.0000, 0.7236, //
|
||||
-0.2764, 0.6180, -0.8944, 2.0000, 1.1708, -2.6180, //
|
||||
0.5000, -1.1056, 1.5279, -2.8944, 10.4721, -8.5000;
|
||||
// multiply by -1 since the cheb points have a phase shift wrt Trefethen
|
||||
// This was verified with chebfun
|
||||
expected = -expected;
|
||||
|
||||
Matrix actual = Chebyshev2::DifferentiationMatrix(N);
|
||||
EXPECT(assert_equal((Matrix)expected, actual, 1e-4));
|
||||
}
|
||||
|
||||
// test function for CalculateWeights and DerivativeWeights
|
||||
double f(double x) {
|
||||
// return 3*(x**3) - 2*(x**2) + 5*x - 11
|
||||
return 3.0 * pow(x, 3) - 2.0 * pow(x, 2) + 5.0 * x - 11;
|
||||
}
|
||||
|
||||
// its derivative
|
||||
double fprime(double x) {
|
||||
// return 9*(x**2) - 4*(x) + 5
|
||||
return 9.0 * pow(x, 2) - 4.0 * x + 5.0;
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, CalculateWeights) {
|
||||
Eigen::Matrix<double, -1, 1> fvals(N);
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
fvals(i) = f(Chebyshev2::Point(N, i));
|
||||
}
|
||||
double x1 = 0.7, x2 = -0.376;
|
||||
Weights weights1 = Chebyshev2::CalculateWeights(N, x1);
|
||||
Weights weights2 = Chebyshev2::CalculateWeights(N, x2);
|
||||
EXPECT_DOUBLES_EQUAL(f(x1), weights1 * fvals, 1e-8);
|
||||
EXPECT_DOUBLES_EQUAL(f(x2), weights2 * fvals, 1e-8);
|
||||
}
|
||||
|
||||
TEST(Chebyshev2, CalculateWeights2) {
|
||||
double a = 0, b = 10, x1 = 7, x2 = 4.12;
|
||||
|
||||
Eigen::Matrix<double, -1, 1> fvals(N);
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
fvals(i) = f(Chebyshev2::Point(N, i, a, b));
|
||||
}
|
||||
|
||||
Weights weights1 = Chebyshev2::CalculateWeights(N, x1, a, b);
|
||||
EXPECT_DOUBLES_EQUAL(f(x1), weights1 * fvals, 1e-8);
|
||||
|
||||
Weights weights2 = Chebyshev2::CalculateWeights(N, x2, a, b);
|
||||
double expected2 = f(x2); // 185.454784
|
||||
double actual2 = weights2 * fvals;
|
||||
EXPECT_DOUBLES_EQUAL(expected2, actual2, 1e-8);
|
||||
}
|
||||
|
||||
TEST(Chebyshev2, DerivativeWeights) {
|
||||
Eigen::Matrix<double, -1, 1> fvals(N);
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
fvals(i) = f(Chebyshev2::Point(N, i));
|
||||
}
|
||||
double x1 = 0.7, x2 = -0.376, x3 = 0.0;
|
||||
Weights dWeights1 = Chebyshev2::DerivativeWeights(N, x1);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x1), dWeights1 * fvals, 1e-9);
|
||||
|
||||
Weights dWeights2 = Chebyshev2::DerivativeWeights(N, x2);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x2), dWeights2 * fvals, 1e-9);
|
||||
|
||||
Weights dWeights3 = Chebyshev2::DerivativeWeights(N, x3);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x3), dWeights3 * fvals, 1e-9);
|
||||
|
||||
// test if derivative calculation and cheb point is correct
|
||||
double x4 = Chebyshev2::Point(N, 3);
|
||||
Weights dWeights4 = Chebyshev2::DerivativeWeights(N, x4);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x4), dWeights4 * fvals, 1e-9);
|
||||
}
|
||||
|
||||
TEST(Chebyshev2, DerivativeWeights2) {
|
||||
double x1 = 5, x2 = 4.12, a = 0, b = 10;
|
||||
|
||||
Eigen::Matrix<double, -1, 1> fvals(N);
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
fvals(i) = f(Chebyshev2::Point(N, i, a, b));
|
||||
}
|
||||
|
||||
Weights dWeights1 = Chebyshev2::DerivativeWeights(N, x1, a, b);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x1), dWeights1 * fvals, 1e-8);
|
||||
|
||||
Weights dWeights2 = Chebyshev2::DerivativeWeights(N, x2, a, b);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x2), dWeights2 * fvals, 1e-8);
|
||||
|
||||
// test if derivative calculation and cheb point is correct
|
||||
double x3 = Chebyshev2::Point(N, 3, a, b);
|
||||
Weights dWeights3 = Chebyshev2::DerivativeWeights(N, x3, a, b);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x3), dWeights3 * fvals, 1e-8);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check two different ways to calculate the derivative weights
|
||||
TEST(Chebyshev2, DerivativeWeightsDifferentiationMatrix) {
|
||||
const size_t N6 = 6;
|
||||
double x1 = 0.311;
|
||||
Matrix D6 = Chebyshev2::DifferentiationMatrix(N6);
|
||||
Weights expected = Chebyshev2::CalculateWeights(N6, x1) * D6;
|
||||
Weights actual = Chebyshev2::DerivativeWeights(N6, x1);
|
||||
EXPECT(assert_equal(expected, actual, 1e-12));
|
||||
|
||||
double a = -3, b = 8, x2 = 5.05;
|
||||
Matrix D6_2 = Chebyshev2::DifferentiationMatrix(N6, a, b);
|
||||
Weights expected1 = Chebyshev2::CalculateWeights(N6, x2, a, b) * D6_2;
|
||||
Weights actual1 = Chebyshev2::DerivativeWeights(N6, x2, a, b);
|
||||
EXPECT(assert_equal(expected1, actual1, 1e-12));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check two different ways to calculate the derivative weights
|
||||
TEST(Chebyshev2, DerivativeWeights6) {
|
||||
const size_t N6 = 6;
|
||||
Matrix D6 = Chebyshev2::DifferentiationMatrix(N6);
|
||||
Chebyshev2::Parameters x = Chebyshev2::Points(N6); // ramp with slope 1
|
||||
EXPECT(assert_equal(Vector::Ones(N6), Vector(D6 * x)));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check two different ways to calculate the derivative weights
|
||||
TEST(Chebyshev2, DerivativeWeights7) {
|
||||
const size_t N7 = 7;
|
||||
Matrix D7 = Chebyshev2::DifferentiationMatrix(N7);
|
||||
Chebyshev2::Parameters x = Chebyshev2::Points(N7); // ramp with slope 1
|
||||
EXPECT(assert_equal(Vector::Ones(N7), Vector(D7 * x)));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check derivative in two different ways: numerical and using D on f
|
||||
Vector6 f3_at_6points = (Vector6() << 4, 2, 6, 2, 4, 3).finished();
|
||||
double proxy3(double x) {
|
||||
return Chebyshev2::EvaluationFunctor(6, x)(f3_at_6points);
|
||||
}
|
||||
|
||||
TEST(Chebyshev2, Derivative6) {
|
||||
// Check Derivative evaluation at point x=0.2
|
||||
|
||||
// calculate expected values by numerical derivative of synthesis
|
||||
const double x = 0.2;
|
||||
Matrix numeric_dTdx = numericalDerivative11<double, double>(proxy3, x);
|
||||
|
||||
// Calculate derivatives at Chebyshev points using D3, interpolate
|
||||
Matrix D6 = Chebyshev2::DifferentiationMatrix(6);
|
||||
Vector derivative_at_points = D6 * f3_at_6points;
|
||||
Chebyshev2::EvaluationFunctor fx(6, x);
|
||||
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), fx(derivative_at_points), 1e-8);
|
||||
|
||||
// Do directly
|
||||
Chebyshev2::DerivativeFunctor dfdx(6, x);
|
||||
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), dfdx(f3_at_6points), 1e-8);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Assert that derivative also works in non-standard interval [0,3]
|
||||
double proxy4(double x) {
|
||||
return Chebyshev2::EvaluationFunctor(6, x, 0, 3)(f3_at_6points);
|
||||
}
|
||||
|
||||
TEST(Chebyshev2, Derivative6_03) {
|
||||
// Check Derivative evaluation at point x=0.2, in interval [0,3]
|
||||
|
||||
// calculate expected values by numerical derivative of synthesis
|
||||
const double x = 0.2;
|
||||
Matrix numeric_dTdx = numericalDerivative11<double, double>(proxy4, x);
|
||||
|
||||
// Calculate derivatives at Chebyshev points using D3, interpolate
|
||||
Matrix D6 = Chebyshev2::DifferentiationMatrix(6, 0, 3);
|
||||
Vector derivative_at_points = D6 * f3_at_6points;
|
||||
Chebyshev2::EvaluationFunctor fx(6, x, 0, 3);
|
||||
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), fx(derivative_at_points), 1e-8);
|
||||
|
||||
// Do directly
|
||||
Chebyshev2::DerivativeFunctor dfdx(6, x, 0, 3);
|
||||
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), dfdx(f3_at_6points), 1e-8);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Test VectorDerivativeFunctor
|
||||
TEST(Chebyshev2, VectorDerivativeFunctor) {
|
||||
const size_t N = 3, M = 2;
|
||||
const double x = 0.2;
|
||||
using VecD = Chebyshev2::VectorDerivativeFunctor<M>;
|
||||
VecD fx(N, x, 0, 3);
|
||||
ParameterMatrix<M> X(N);
|
||||
Matrix actualH(M, M * N);
|
||||
EXPECT(assert_equal(Vector::Zero(M), (Vector)fx(X, actualH), 1e-8));
|
||||
|
||||
// Test Jacobian
|
||||
Matrix expectedH = numericalDerivative11<Vector2, ParameterMatrix<M>, M * N>(
|
||||
boost::bind(&VecD::operator(), fx, _1, boost::none), X);
|
||||
EXPECT(assert_equal(expectedH, actualH, 1e-7));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Test VectorDerivativeFunctor with polynomial function
|
||||
TEST(Chebyshev2, VectorDerivativeFunctor2) {
|
||||
const size_t N = 64, M = 1, T = 15;
|
||||
using VecD = Chebyshev2::VectorDerivativeFunctor<M>;
|
||||
|
||||
const Vector points = Chebyshev2::Points(N, 0, T);
|
||||
|
||||
// Assign the parameter matrix
|
||||
Vector values(N);
|
||||
for (size_t i = 0; i < N; ++i) {
|
||||
values(i) = f(points(i));
|
||||
}
|
||||
ParameterMatrix<M> X(values);
|
||||
|
||||
// Evaluate the derivative at the chebyshev points using
|
||||
// VectorDerivativeFunctor.
|
||||
for (size_t i = 0; i < N; ++i) {
|
||||
VecD d(N, points(i), 0, T);
|
||||
Vector1 Dx = d(X);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(points(i)), Dx(0), 1e-6);
|
||||
}
|
||||
|
||||
// Test Jacobian at the first chebyshev point.
|
||||
Matrix actualH(M, M * N);
|
||||
VecD vecd(N, points(0), 0, T);
|
||||
vecd(X, actualH);
|
||||
Matrix expectedH = numericalDerivative11<Vector1, ParameterMatrix<M>, M * N>(
|
||||
boost::bind(&VecD::operator(), vecd, _1, boost::none), X);
|
||||
EXPECT(assert_equal(expectedH, actualH, 1e-6));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Test ComponentDerivativeFunctor
|
||||
TEST(Chebyshev2, ComponentDerivativeFunctor) {
|
||||
const size_t N = 6, M = 2;
|
||||
const double x = 0.2;
|
||||
using CompFunc = Chebyshev2::ComponentDerivativeFunctor<M>;
|
||||
size_t row = 1;
|
||||
CompFunc fx(N, row, x, 0, 3);
|
||||
ParameterMatrix<M> X(N);
|
||||
Matrix actualH(1, M * N);
|
||||
EXPECT_DOUBLES_EQUAL(0, fx(X, actualH), 1e-8);
|
||||
|
||||
Matrix expectedH = numericalDerivative11<double, ParameterMatrix<M>, M * N>(
|
||||
boost::bind(&CompFunc::operator(), fx, _1, boost::none), X);
|
||||
EXPECT(assert_equal(expectedH, actualH, 1e-7));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, IntegralWeights) {
|
||||
const size_t N7 = 7;
|
||||
Vector actual = Chebyshev2::IntegrationWeights(N7);
|
||||
Vector expected = (Vector(N7) << 0.0285714285714286, 0.253968253968254,
|
||||
0.457142857142857, 0.520634920634921, 0.457142857142857,
|
||||
0.253968253968254, 0.0285714285714286)
|
||||
.finished();
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
|
||||
const size_t N8 = 8;
|
||||
Vector actual2 = Chebyshev2::IntegrationWeights(N8);
|
||||
Vector expected2 = (Vector(N8) << 0.0204081632653061, 0.190141007218208,
|
||||
0.352242423718159, 0.437208405798326, 0.437208405798326,
|
||||
0.352242423718159, 0.190141007218208, 0.0204081632653061)
|
||||
.finished();
|
||||
EXPECT(assert_equal(expected2, actual2));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
//******************************************************************************
|
|
@ -0,0 +1,254 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testFourier.cpp
|
||||
* @date July 4, 2020
|
||||
* @author Frank Dellaert, Varun Agrawal
|
||||
* @brief Unit tests for Fourier Basis Decompositions with Expressions
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/basis/FitBasis.h>
|
||||
#include <gtsam/basis/Fourier.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
auto model = noiseModel::Unit::Create(1);
|
||||
|
||||
// Coefficients for testing, respectively 3 and 7 parameter Fourier basis.
|
||||
// They correspond to best approximation of TestFunction(x)
|
||||
const Vector k3Coefficients = (Vector3() << 1.5661, 1.2717, 1.2717).finished();
|
||||
const Vector7 k7Coefficients =
|
||||
(Vector7() << 1.5661, 1.2717, 1.2717, -0.0000, 0.5887, -0.0943, 0.0943)
|
||||
.finished();
|
||||
|
||||
// The test-function used below
|
||||
static double TestFunction(double x) { return exp(sin(x) + cos(x)); }
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, BasisEvaluationFunctor) {
|
||||
// fx(0) takes coefficients c to calculate the value f(c;x==0)
|
||||
FourierBasis::EvaluationFunctor fx(3, 0);
|
||||
EXPECT_DOUBLES_EQUAL(k3Coefficients[0] + k3Coefficients[1],
|
||||
fx(k3Coefficients), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, BasisEvaluationFunctorDerivative) {
|
||||
// If we give the H argument, we get the derivative of fx(0) wrpt coefficients
|
||||
// Needs to be Matrix so it can be used by OptionalJacobian.
|
||||
Matrix H(1, 3);
|
||||
FourierBasis::EvaluationFunctor fx(3, 0);
|
||||
EXPECT_DOUBLES_EQUAL(k3Coefficients[0] + k3Coefficients[1],
|
||||
fx(k3Coefficients, H), 1e-9);
|
||||
|
||||
Matrix13 expectedH(1, 1, 0);
|
||||
EXPECT(assert_equal(expectedH, H));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, Manual) {
|
||||
const size_t N = 3;
|
||||
|
||||
// We will create a linear factor graph
|
||||
GaussianFactorGraph graph;
|
||||
|
||||
// We create an unknown Vector expression for the coefficients
|
||||
Key key(1);
|
||||
|
||||
// We will need values below to test the Jacobians
|
||||
Values values;
|
||||
values.insert<Vector>(key, Vector::Zero(N)); // value does not really matter
|
||||
|
||||
// At 16 different samples points x, check Predict_ expression
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
const double x = i * M_PI / 8;
|
||||
const double desiredValue = TestFunction(x);
|
||||
|
||||
// Manual JacobianFactor
|
||||
Matrix A(1, N);
|
||||
A << 1, cos(x), sin(x);
|
||||
Vector b(1);
|
||||
b << desiredValue;
|
||||
JacobianFactor linearFactor(key, A, b);
|
||||
graph.add(linearFactor);
|
||||
|
||||
// Create factor to predict value at x
|
||||
EvaluationFactor<FourierBasis> predictFactor(key, desiredValue, model, N,
|
||||
x);
|
||||
|
||||
// Check expression Jacobians
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(predictFactor, values, 1e-5, 1e-9);
|
||||
|
||||
auto linearizedFactor = predictFactor.linearize(values);
|
||||
auto linearizedJacobianFactor =
|
||||
boost::dynamic_pointer_cast<JacobianFactor>(linearizedFactor);
|
||||
CHECK(linearizedJacobianFactor); // makes sure it's indeed a JacobianFactor
|
||||
EXPECT(assert_equal(linearFactor, *linearizedJacobianFactor, 1e-9));
|
||||
}
|
||||
|
||||
// Solve linear graph
|
||||
VectorValues actual = graph.optimize();
|
||||
EXPECT(assert_equal((Vector)k3Coefficients, actual.at(key), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, EvaluationFactor) {
|
||||
// Check fitting a function with a 7-parameter Fourier basis
|
||||
|
||||
// Create linear factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
Key key(1);
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
double x = i * M_PI / 8, desiredValue = TestFunction(x);
|
||||
graph.emplace_shared<EvaluationFactor<FourierBasis>>(key, desiredValue,
|
||||
model, 7, x);
|
||||
}
|
||||
|
||||
// Solve FourierFactorGraph
|
||||
Values values;
|
||||
values.insert<Vector>(key, Vector::Zero(7));
|
||||
GaussianFactorGraph::shared_ptr lfg = graph.linearize(values);
|
||||
VectorValues actual = lfg->optimize();
|
||||
|
||||
EXPECT(assert_equal((Vector)k7Coefficients, actual.at(key), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, WeightMatrix) {
|
||||
// The WeightMatrix creates an m*n matrix, where m is the number of sample
|
||||
// points, and n is the number of parameters.
|
||||
Matrix expected(2, 3);
|
||||
expected.row(0) << 1, cos(1), sin(1);
|
||||
expected.row(1) << 1, cos(2), sin(2);
|
||||
Vector2 X(1, 2);
|
||||
Matrix actual = FourierBasis::WeightMatrix(3, X);
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, Decomposition) {
|
||||
// Create example sequence
|
||||
Sequence sequence;
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
double x = i * M_PI / 8, y = TestFunction(x);
|
||||
sequence[x] = y;
|
||||
}
|
||||
|
||||
// Do Fourier Decomposition
|
||||
FitBasis<FourierBasis> actual(sequence, model, 3);
|
||||
|
||||
// Check
|
||||
EXPECT(assert_equal((Vector)k3Coefficients, actual.parameters(), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check derivative in two different ways: numerical and using D on f
|
||||
double proxy(double x) {
|
||||
return FourierBasis::EvaluationFunctor(7, x)(k7Coefficients);
|
||||
}
|
||||
|
||||
TEST(Basis, Derivative7) {
|
||||
// Check Derivative evaluation at point x=0.2
|
||||
|
||||
// Calculate expected values by numerical derivative of proxy.
|
||||
const double x = 0.2;
|
||||
Matrix numeric_dTdx = numericalDerivative11<double, double>(proxy, x);
|
||||
|
||||
// Calculate derivatives at Chebyshev points using D7, interpolate
|
||||
Matrix D7 = FourierBasis::DifferentiationMatrix(7);
|
||||
Vector derivativeCoefficients = D7 * k7Coefficients;
|
||||
FourierBasis::EvaluationFunctor fx(7, x);
|
||||
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), fx(derivativeCoefficients), 1e-8);
|
||||
|
||||
// Do directly
|
||||
FourierBasis::DerivativeFunctor dfdx(7, x);
|
||||
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), dfdx(k7Coefficients), 1e-8);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, VecDerivativeFunctor) {
|
||||
using DotShape = typename FourierBasis::VectorDerivativeFunctor<2>;
|
||||
const size_t N = 3;
|
||||
|
||||
// MATLAB example, Dec 27 2019, commit 014eded5
|
||||
double h = 2 * M_PI / 16;
|
||||
Vector2 dotShape(0.5556, -0.8315); // at h/2
|
||||
DotShape dotShapeFunction(N, h / 2);
|
||||
Matrix23 theta_mat = (Matrix32() << 0, 0, 0.7071, 0.7071, 0.7071, -0.7071)
|
||||
.finished()
|
||||
.transpose();
|
||||
ParameterMatrix<2> theta(theta_mat);
|
||||
EXPECT(assert_equal(Vector(dotShape), dotShapeFunction(theta), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Suppose we want to parameterize a periodic function with function values at
|
||||
// specific times, rather than coefficients. Can we do it? This would be a
|
||||
// generalization of the Fourier transform, and constitute a "pseudo-spectral"
|
||||
// parameterization. One way to do this is to establish hard constraints that
|
||||
// express the relationship between the new parameters and the coefficients.
|
||||
// For example, below I'd like the parameters to be the function values at
|
||||
// X = {0.1,0.2,0.3}, rather than a 3-vector of coefficients.
|
||||
// Because the values f(X) = at these points can be written as f(X) = W(X)*c,
|
||||
// we can simply express the coefficents c as c=inv(W(X))*f, which is a
|
||||
// generalized Fourier transform. That also means we can create factors with the
|
||||
// unknown f-values, as done manually below.
|
||||
TEST(Basis, PseudoSpectral) {
|
||||
// We will create a linear factor graph
|
||||
GaussianFactorGraph graph;
|
||||
|
||||
const size_t N = 3;
|
||||
const Key key(1);
|
||||
|
||||
// The correct values at X = {0.1,0.2,0.3} are simply W*c
|
||||
const Vector X = (Vector3() << 0.1, 0.2, 0.3).finished();
|
||||
const Matrix W = FourierBasis::WeightMatrix(N, X);
|
||||
const Vector expected = W * k3Coefficients;
|
||||
|
||||
// Check those values are indeed correct values of Fourier approximation
|
||||
using Eval = FourierBasis::EvaluationFunctor;
|
||||
EXPECT_DOUBLES_EQUAL(Eval(N, 0.1)(k3Coefficients), expected(0), 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(Eval(N, 0.2)(k3Coefficients), expected(1), 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(Eval(N, 0.3)(k3Coefficients), expected(2), 1e-9);
|
||||
|
||||
// Calculate "inverse Fourier transform" matrix
|
||||
const Matrix invW = W.inverse();
|
||||
|
||||
// At 16 different samples points x, add a factor on fExpr
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
const double x = i * M_PI / 8;
|
||||
const double desiredValue = TestFunction(x);
|
||||
|
||||
// Manual JacobianFactor
|
||||
Matrix A(1, 3);
|
||||
A << 1, cos(x), sin(x);
|
||||
Vector b(1);
|
||||
b << desiredValue;
|
||||
JacobianFactor linearFactor(key, A * invW, b);
|
||||
graph.add(linearFactor);
|
||||
}
|
||||
|
||||
// Solve linear graph
|
||||
VectorValues actual = graph.optimize();
|
||||
EXPECT(assert_equal((Vector)expected, actual.at(key), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
//******************************************************************************
|
|
@ -0,0 +1,145 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testParameterMatrix.cpp
|
||||
* @date Sep 22, 2020
|
||||
* @author Varun Agrawal, Frank Dellaert
|
||||
* @brief Unit tests for ParameterMatrix.h
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
using Parameters = Chebyshev2::Parameters;
|
||||
|
||||
const size_t M = 2, N = 5;
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Constructor) {
|
||||
ParameterMatrix<2> actual1(3);
|
||||
ParameterMatrix<2> expected1(Matrix::Zero(2, 3));
|
||||
EXPECT(assert_equal(expected1, actual1));
|
||||
|
||||
ParameterMatrix<2> actual2(Matrix::Ones(2, 3));
|
||||
ParameterMatrix<2> expected2(Matrix::Ones(2, 3));
|
||||
EXPECT(assert_equal(expected2, actual2));
|
||||
EXPECT(assert_equal(expected2.matrix(), actual2.matrix()));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Dimensions) {
|
||||
ParameterMatrix<M> params(N);
|
||||
EXPECT_LONGS_EQUAL(params.rows(), M);
|
||||
EXPECT_LONGS_EQUAL(params.cols(), N);
|
||||
EXPECT_LONGS_EQUAL(params.dim(), M * N);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Getters) {
|
||||
ParameterMatrix<M> params(N);
|
||||
|
||||
Matrix expectedMatrix = Matrix::Zero(2, 5);
|
||||
EXPECT(assert_equal(expectedMatrix, params.matrix()));
|
||||
|
||||
Matrix expectedMatrixTranspose = Matrix::Zero(5, 2);
|
||||
EXPECT(assert_equal(expectedMatrixTranspose, params.transpose()));
|
||||
|
||||
ParameterMatrix<M> p2(Matrix::Ones(M, N));
|
||||
Vector expectedRowVector = Vector::Ones(N);
|
||||
for (size_t r = 0; r < M; ++r) {
|
||||
EXPECT(assert_equal(p2.row(r), expectedRowVector));
|
||||
}
|
||||
|
||||
ParameterMatrix<M> p3(2 * Matrix::Ones(M, N));
|
||||
Vector expectedColVector = 2 * Vector::Ones(M);
|
||||
for (size_t c = 0; c < M; ++c) {
|
||||
EXPECT(assert_equal(p3.col(c), expectedColVector));
|
||||
}
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Setters) {
|
||||
ParameterMatrix<M> params(Matrix::Zero(M, N));
|
||||
Matrix expected = Matrix::Zero(M, N);
|
||||
|
||||
// row
|
||||
params.row(0) = Vector::Ones(N);
|
||||
expected.row(0) = Vector::Ones(N);
|
||||
EXPECT(assert_equal(expected, params.matrix()));
|
||||
|
||||
// col
|
||||
params.col(2) = Vector::Ones(M);
|
||||
expected.col(2) = Vector::Ones(M);
|
||||
|
||||
EXPECT(assert_equal(expected, params.matrix()));
|
||||
|
||||
// setZero
|
||||
params.setZero();
|
||||
expected.setZero();
|
||||
EXPECT(assert_equal(expected, params.matrix()));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Addition) {
|
||||
ParameterMatrix<M> params(Matrix::Ones(M, N));
|
||||
ParameterMatrix<M> expected(2 * Matrix::Ones(M, N));
|
||||
|
||||
// Add vector
|
||||
EXPECT(assert_equal(expected, params + Vector::Ones(M * N)));
|
||||
// Add another ParameterMatrix
|
||||
ParameterMatrix<M> actual = params + ParameterMatrix<M>(Matrix::Ones(M, N));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Subtraction) {
|
||||
ParameterMatrix<M> params(2 * Matrix::Ones(M, N));
|
||||
ParameterMatrix<M> expected(Matrix::Ones(M, N));
|
||||
|
||||
// Subtract vector
|
||||
EXPECT(assert_equal(expected, params - Vector::Ones(M * N)));
|
||||
// Subtract another ParameterMatrix
|
||||
ParameterMatrix<M> actual = params - ParameterMatrix<M>(Matrix::Ones(M, N));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Multiplication) {
|
||||
ParameterMatrix<M> params(Matrix::Ones(M, N));
|
||||
Matrix multiplier = 2 * Matrix::Ones(N, 2);
|
||||
Matrix expected = 2 * N * Matrix::Ones(M, 2);
|
||||
EXPECT(assert_equal(expected, params * multiplier));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, VectorSpace) {
|
||||
ParameterMatrix<M> params(Matrix::Ones(M, N));
|
||||
// vector
|
||||
EXPECT(assert_equal(Vector::Ones(M * N), params.vector()));
|
||||
// identity
|
||||
EXPECT(assert_equal(ParameterMatrix<M>::identity(),
|
||||
ParameterMatrix<M>(Matrix::Zero(M, 0))));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
//******************************************************************************
|
|
@ -110,7 +110,7 @@ class GTSAM_EXPORT FunctorizedFactor : public NoiseModelFactor1<T> {
|
|||
bool equals(const NonlinearFactor &other, double tol = 1e-9) const override {
|
||||
const FunctorizedFactor<R, T> *e =
|
||||
dynamic_cast<const FunctorizedFactor<R, T> *>(&other);
|
||||
return e && Base::equals(other, tol) &&
|
||||
return e != nullptr && Base::equals(other, tol) &&
|
||||
traits<R>::Equals(this->measured_, e->measured_, tol);
|
||||
}
|
||||
/// @}
|
||||
|
|
|
@ -20,8 +20,12 @@
|
|||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
#include <gtsam/basis/Basis.h>
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/nonlinear/FunctorizedFactor.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
|
||||
using namespace std;
|
||||
|
@ -60,7 +64,7 @@ class ProjectionFunctor {
|
|||
if (H1) {
|
||||
H1->resize(x.size(), A.size());
|
||||
*H1 << I_3x3, I_3x3, I_3x3;
|
||||
}
|
||||
}
|
||||
if (H2) *H2 = A;
|
||||
return A * x;
|
||||
}
|
||||
|
@ -255,18 +259,148 @@ TEST(FunctorizedFactor, Lambda2) {
|
|||
if (H1) {
|
||||
H1->resize(x.size(), A.size());
|
||||
*H1 << I_3x3, I_3x3, I_3x3;
|
||||
}
|
||||
}
|
||||
if (H2) *H2 = A;
|
||||
return A * x;
|
||||
};
|
||||
// FunctorizedFactor<Matrix> factor(key, measurement, model, lambda);
|
||||
auto factor = MakeFunctorizedFactor2<Matrix, Vector>(keyA, keyx, measurement, model2, lambda);
|
||||
auto factor = MakeFunctorizedFactor2<Matrix, Vector>(keyA, keyx, measurement,
|
||||
model2, lambda);
|
||||
|
||||
Vector error = factor.evaluateError(A, x);
|
||||
|
||||
EXPECT(assert_equal(Vector::Zero(3), error, 1e-9));
|
||||
}
|
||||
|
||||
const size_t N = 2;
|
||||
|
||||
//******************************************************************************
|
||||
TEST(FunctorizedFactor, Print2) {
|
||||
const size_t M = 1;
|
||||
|
||||
Vector measured = Vector::Ones(M) * 42;
|
||||
|
||||
auto model = noiseModel::Isotropic::Sigma(M, 1.0);
|
||||
VectorEvaluationFactor<Chebyshev2, M> priorFactor(key, measured, model, N, 0);
|
||||
|
||||
string expected =
|
||||
" keys = { X0 }\n"
|
||||
" noise model: unit (1) \n"
|
||||
"FunctorizedFactor(X0)\n"
|
||||
" measurement: [\n"
|
||||
" 42\n"
|
||||
"]\n"
|
||||
" noise model sigmas: 1\n";
|
||||
|
||||
EXPECT(assert_print_equal(expected, priorFactor));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(FunctorizedFactor, VectorEvaluationFactor) {
|
||||
const size_t M = 4;
|
||||
|
||||
Vector measured = Vector::Zero(M);
|
||||
|
||||
auto model = noiseModel::Isotropic::Sigma(M, 1.0);
|
||||
VectorEvaluationFactor<Chebyshev2, M> priorFactor(key, measured, model, N, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(priorFactor);
|
||||
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.verbosity = NonlinearOptimizerParams::SILENT;
|
||||
parameters.verbosityLM = LevenbergMarquardtParams::SILENT;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(FunctorizedFactor, VectorComponentFactor) {
|
||||
const int P = 4;
|
||||
const size_t i = 2;
|
||||
const double measured = 0.0, t = 3.0, a = 2.0, b = 4.0;
|
||||
auto model = noiseModel::Isotropic::Sigma(1, 1.0);
|
||||
VectorComponentFactor<Chebyshev2, P> controlPrior(key, measured, model, N, i,
|
||||
t, a, b);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(controlPrior);
|
||||
|
||||
ParameterMatrix<P> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<P>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.verbosity = NonlinearOptimizerParams::SILENT;
|
||||
parameters.verbosityLM = LevenbergMarquardtParams::SILENT;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(FunctorizedFactor, VecDerivativePrior) {
|
||||
const size_t M = 4;
|
||||
|
||||
Vector measured = Vector::Zero(M);
|
||||
auto model = noiseModel::Isotropic::Sigma(M, 1.0);
|
||||
VectorDerivativeFactor<Chebyshev2, M> vecDPrior(key, measured, model, N, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(vecDPrior);
|
||||
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.verbosity = NonlinearOptimizerParams::SILENT;
|
||||
parameters.verbosityLM = LevenbergMarquardtParams::SILENT;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(FunctorizedFactor, ComponentDerivativeFactor) {
|
||||
const size_t M = 4;
|
||||
|
||||
double measured = 0;
|
||||
auto model = noiseModel::Isotropic::Sigma(1, 1.0);
|
||||
ComponentDerivativeFactor<Chebyshev2, M> controlDPrior(key, measured, model,
|
||||
N, 0, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(controlDPrior);
|
||||
|
||||
Values initial;
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.verbosity = NonlinearOptimizerParams::SILENT;
|
||||
parameters.verbosityLM = LevenbergMarquardtParams::SILENT;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
@ -61,6 +61,7 @@ set(interface_headers
|
|||
${PROJECT_SOURCE_DIR}/gtsam/slam/slam.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/sfm/sfm.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/navigation/navigation.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/basis/basis.i
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
/* Please refer to:
|
||||
* https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
|
||||
* These are required to save one copy operation on Python calls.
|
||||
*
|
||||
* NOTES
|
||||
* =================
|
||||
*
|
||||
* `PYBIND11_MAKE_OPAQUE` will mark the type as "opaque" for the pybind11
|
||||
* automatic STL binding, such that the raw objects can be accessed in Python.
|
||||
* Without this they will be automatically converted to a Python object, and all
|
||||
* mutations on Python side will not be reflected on C++.
|
||||
*/
|
|
@ -0,0 +1,12 @@
|
|||
/* Please refer to:
|
||||
* https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
|
||||
* These are required to save one copy operation on Python calls.
|
||||
*
|
||||
* NOTES
|
||||
* =================
|
||||
*
|
||||
* `py::bind_vector` and similar machinery gives the std container a Python-like
|
||||
* interface, but without the `<pybind11/stl.h>` copying mechanism. Combined
|
||||
* with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python,
|
||||
* and saves one copy operation.
|
||||
*/
|
Loading…
Reference in New Issue