Remove support for dual keys. Added finished() after all matrices and vectors. Remove buildDualGraph from GaussianFactorGraph. Remove support for multipliedHessians for non-linear equality constraints.
parent
181bfb4f0f
commit
e539738fd0
|
@ -34,6 +34,7 @@ INSTANTIATE_LIE(Pose2);
|
|||
GTSAM_CONCEPT_POSE_INST(Pose2);
|
||||
|
||||
static const Rot2 R_PI_2(Rot2::fromCosSin(0., 1.));
|
||||
static const Matrix3 I3 = eye(3);
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix3 Pose2::matrix() const {
|
||||
|
@ -160,7 +161,7 @@ Matrix3 Pose2::dexpInvL(const Vector3& v) {
|
|||
// TODO: Duplicated code with Pose3. Maybe unify them at higher Lie level?
|
||||
// Bernoulli numbers, from Wikipedia
|
||||
static const Vector B = (Vector(9) << 1.0, -1.0 / 2.0, 1. / 6., 0.0, -1.0 / 30.0,
|
||||
0.0, 1.0 / 42.0, 0.0, -1.0 / 30);
|
||||
0.0, 1.0 / 42.0, 0.0, -1.0 / 30).finished();
|
||||
static const int N = 5; // order of approximation
|
||||
Matrix res = I3;
|
||||
Matrix3 ad_i = I3;
|
||||
|
|
|
@ -143,7 +143,7 @@ TEST(Pose2, expmap0d) {
|
|||
// test case for screw motion in the plane
|
||||
namespace screw {
|
||||
double w=0.3;
|
||||
Vector xi = (Vector(3) << 0.0, w, w);
|
||||
Vector xi = (Vector(3) << 0.0, w, w).finished();
|
||||
Rot2 expectedR = Rot2::fromAngle(w);
|
||||
Point2 expectedT(-0.0446635, 0.29552);
|
||||
Pose2 expected(expectedR, expectedT);
|
||||
|
@ -193,21 +193,21 @@ TEST(Pose2, logmap_full) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector w = (Vector(3) << 0.1, 0.27, -0.2);
|
||||
Vector w = (Vector(3) << 0.1, 0.27, -0.2).finished();
|
||||
|
||||
// Left trivialization Derivative of exp(w) over w: How exp(w) changes when w changes?
|
||||
// We find y such that: exp(w) exp(y) = exp(w + dw) for dw --> 0
|
||||
// => y = log (exp(-w) * exp(w+dw))
|
||||
Vector testDexpL(const LieVector& dw) {
|
||||
Vector y = Pose2::Logmap(Pose2::Expmap(-w) * Pose2::Expmap(w + dw));
|
||||
Vector3 testDexpL(const Vector& dw) {
|
||||
Vector3 y = Pose2::Logmap(Pose2::Expmap(-w) * Pose2::Expmap(w + dw));
|
||||
return y;
|
||||
}
|
||||
|
||||
TEST( Pose2, dexpL) {
|
||||
Matrix actualDexpL = Pose2::dexpL(w);
|
||||
Matrix expectedDexpL = numericalDerivative11(
|
||||
boost::function<Vector(const LieVector&)>(
|
||||
boost::bind(testDexpL, _1)), LieVector(zero(3)), 1e-2);
|
||||
Matrix expectedDexpL = numericalDerivative11<Vector, Vector3>(
|
||||
boost::function<Vector(const Vector&)>(
|
||||
boost::bind(testDexpL, _1)), Vector(zero(3)), 1e-2);
|
||||
EXPECT(assert_equal(expectedDexpL, actualDexpL, 1e-5));
|
||||
|
||||
Matrix actualDexpInvL = Pose2::dexpInvL(w);
|
||||
|
|
|
@ -156,21 +156,21 @@ TEST( Rot2, relativeBearing )
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector w = (Vector(1) << 0.27);
|
||||
Vector w = (Vector(1) << 0.27).finished();
|
||||
|
||||
// Left trivialization Derivative of exp(w) over w: How exp(w) changes when w changes?
|
||||
// We find y such that: exp(w) exp(y) = exp(w + dw) for dw --> 0
|
||||
// => y = log (exp(-w) * exp(w+dw))
|
||||
Vector testDexpL(const LieVector& dw) {
|
||||
Vector y = Rot2::Logmap(Rot2::Expmap(-w) * Rot2::Expmap(w + dw));
|
||||
Vector1 testDexpL(const Vector& dw) {
|
||||
Vector1 y = Rot2::Logmap(Rot2::Expmap(-w) * Rot2::Expmap(w + dw));
|
||||
return y;
|
||||
}
|
||||
|
||||
TEST( Rot2, dexpL) {
|
||||
Matrix actualDexpL = Rot2::dexpL(w);
|
||||
Matrix expectedDexpL = numericalDerivative11(
|
||||
boost::function<Vector(const LieVector&)>(
|
||||
boost::bind(testDexpL, _1)), LieVector(zero(1)), 1e-2);
|
||||
Matrix expectedDexpL = numericalDerivative11<Vector, Vector1>(
|
||||
boost::function<Vector(const Vector&)>(
|
||||
boost::bind(testDexpL, _1)), Vector(zero(1)), 1e-2);
|
||||
EXPECT(assert_equal(expectedDexpL, actualDexpL, 1e-5));
|
||||
|
||||
Matrix actualDexpInvL = Rot2::dexpInvL(w);
|
||||
|
|
|
@ -128,7 +128,7 @@ namespace gtsam {
|
|||
virtual void multiplyHessianAdd(double alpha, const double* x, double* y) const = 0;
|
||||
|
||||
/// A'*b for Jacobian, eta for Hessian
|
||||
virtual VectorValues gradientAtZero(const boost::optional<const VectorValues&> negDuals = boost::none) const = 0;
|
||||
virtual VectorValues gradientAtZero() const = 0;
|
||||
|
||||
/// A'*b for Jacobian, eta for Hessian (raw memory version)
|
||||
virtual void gradientAtZero(double* d) const = 0;
|
||||
|
|
|
@ -300,13 +300,12 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues GaussianFactorGraph::gradientAtZero(
|
||||
const boost::optional<const VectorValues&> negDuals) const {
|
||||
VectorValues GaussianFactorGraph::gradientAtZero() const {
|
||||
// Zero-out the gradient
|
||||
VectorValues g;
|
||||
BOOST_FOREACH(const sharedFactor& factor, *this) {
|
||||
if (!factor) continue;
|
||||
VectorValues gi = factor->gradientAtZero(negDuals);
|
||||
VectorValues gi = factor->gradientAtZero();
|
||||
g.addInPlace_(gi);
|
||||
}
|
||||
return g;
|
||||
|
@ -394,47 +393,6 @@ VectorValues GaussianFactorGraph::gradientAtZero(
|
|||
return false;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::shared_ptr GaussianFactorGraph::createDualFactor(Key key,
|
||||
const VariableIndex& variableIndex, const VectorValues& delta) const {
|
||||
typedef GaussianFactor G;
|
||||
typedef JacobianFactor J;
|
||||
std::vector<std::pair<Key, Matrix> > Aterms;
|
||||
Vector b = zero(delta.at(key).size());
|
||||
BOOST_FOREACH(size_t factorIx, variableIndex[key]) {
|
||||
// If it is a constraint, transpose the A matrix to have the jacobian of the dual key
|
||||
J::shared_ptr jacobian(boost::dynamic_pointer_cast<J>(this->at(factorIx)));
|
||||
if (jacobian && jacobian->get_model() && jacobian->get_model()->isConstrained()) {
|
||||
Matrix Ai = jacobian->getA(jacobian->find(key)).transpose();
|
||||
boost::optional<Key> dualKey = jacobian->dualKey();
|
||||
if (!dualKey) {
|
||||
throw std::runtime_error("Fail to create dual factor! " \
|
||||
"Constrained JacobianFactor doesn't have a dual key!");
|
||||
}
|
||||
Aterms.push_back(make_pair(*(dualKey), Ai));
|
||||
}
|
||||
else {
|
||||
// If it is unconstrained, collect the gradient to the b vector
|
||||
G::shared_ptr factor(boost::dynamic_pointer_cast<G>(this->at(factorIx)));
|
||||
b += factor->gradient(key, delta);
|
||||
}
|
||||
}
|
||||
return boost::make_shared<JacobianFactor>(Aterms, b);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianFactorGraph::shared_ptr GaussianFactorGraph::buildDualGraph(
|
||||
const KeySet& constrainedVariables,
|
||||
const VariableIndex& variableIndex,
|
||||
const VectorValues& delta) const {
|
||||
GaussianFactorGraph::shared_ptr dualGraph(new GaussianFactorGraph());
|
||||
BOOST_FOREACH(const Key key, constrainedVariables) {
|
||||
// Each constrained key becomes a factor in the dual graph
|
||||
dualGraph->push_back(createDualFactor(key, variableIndex, delta));
|
||||
}
|
||||
return dualGraph;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
boost::tuple<GaussianFactorGraph, GaussianFactorGraph, GaussianFactorGraph> GaussianFactorGraph::splitConstraints() const {
|
||||
typedef HessianFactor H;
|
||||
|
|
|
@ -262,11 +262,10 @@ namespace gtsam {
|
|||
/**
|
||||
* Compute the gradient of the energy function, \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} A x - b
|
||||
* \right\Vert^2 \f$, centered around zero. The gradient is \f$ A^T(Ax-b) \f$.
|
||||
* @param duals[optional] Values of dual variables to scale constrained gradients, \lambda*\nabla c(x)
|
||||
* @param [output] g A VectorValues to store the gradient, which must be preallocated,
|
||||
* see allocateVectorValues
|
||||
* @return The gradient as a VectorValues */
|
||||
virtual VectorValues gradientAtZero(const boost::optional<const VectorValues&> duals = boost::none) const;
|
||||
virtual VectorValues gradientAtZero() const;
|
||||
|
||||
/** Optimize along the gradient direction, with a closed-form computation to perform the line
|
||||
* search. The gradient is computed about \f$ \delta x=0 \f$.
|
||||
|
@ -328,19 +327,6 @@ namespace gtsam {
|
|||
*/
|
||||
boost::tuple<GaussianFactorGraph, GaussianFactorGraph, GaussianFactorGraph> splitConstraints() const;
|
||||
|
||||
/**
|
||||
* Build a dual graph to estimate dual variables associated with constrained factors
|
||||
*/
|
||||
GaussianFactorGraph::shared_ptr buildDualGraph(
|
||||
const KeySet& constrainedVariables, const VariableIndex& variableIndex,
|
||||
const VectorValues& delta) const;
|
||||
|
||||
/**
|
||||
* Create a dual factor from a constrained key
|
||||
*/
|
||||
JacobianFactor::shared_ptr createDualFactor(Key key,
|
||||
const VariableIndex& variableIndex, const VectorValues& delta) const;
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
|
|
|
@ -591,7 +591,7 @@ void HessianFactor::multiplyHessianAdd(double alpha, const double* x,
|
|||
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues HessianFactor::gradientAtZero(const boost::optional<const VectorValues&> negDuals) const {
|
||||
VectorValues HessianFactor::gradientAtZero() const {
|
||||
VectorValues g;
|
||||
size_t n = size();
|
||||
for (size_t j = 0; j < n; ++j)
|
||||
|
|
|
@ -388,7 +388,7 @@ namespace gtsam {
|
|||
* eta for Hessian
|
||||
* Ignore duals parameters. It's only valid for constraints, which need to be a JacobianFactor
|
||||
*/
|
||||
VectorValues gradientAtZero(const boost::optional<const VectorValues&> negDuals = boost::none) const;
|
||||
VectorValues gradientAtZero() const;
|
||||
|
||||
virtual void gradientAtZero(double* d) const;
|
||||
|
||||
|
|
|
@ -29,17 +29,15 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
template<typename TERMS>
|
||||
JacobianFactor::JacobianFactor(const TERMS&terms, const Vector &b,
|
||||
const SharedDiagonal& model, const boost::optional<Key>& dualKey) :
|
||||
dualKey_(dualKey) {
|
||||
const SharedDiagonal& model) {
|
||||
fillTerms(terms, b, model);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<typename KEYS>
|
||||
JacobianFactor::JacobianFactor(const KEYS& keys,
|
||||
const VerticalBlockMatrix& augmentedMatrix, const SharedDiagonal& model,
|
||||
const boost::optional<Key>& dualKey) :
|
||||
Base(keys), Ab_(augmentedMatrix), dualKey_(dualKey) {
|
||||
const VerticalBlockMatrix& augmentedMatrix, const SharedDiagonal& model) :
|
||||
Base(keys), Ab_(augmentedMatrix) {
|
||||
// Check noise model dimension
|
||||
if (model && (DenseIndex) model->dim() != augmentedMatrix.rows())
|
||||
throw InvalidNoiseModel(augmentedMatrix.rows(), model->dim());
|
||||
|
|
|
@ -59,7 +59,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor() :
|
||||
Ab_(cref_list_of<1>(1), 0), dualKey_(boost::none) {
|
||||
Ab_(cref_list_of<1>(1), 0) {
|
||||
getb().setZero();
|
||||
}
|
||||
|
||||
|
@ -77,73 +77,36 @@ JacobianFactor::JacobianFactor(const GaussianFactor& gf) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const Vector& b_in) :
|
||||
Ab_(cref_list_of<1>(1), b_in.size()), dualKey_(boost::none) {
|
||||
Ab_(cref_list_of<1>(1), b_in.size()) {
|
||||
getb() = b_in;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(Key i1, const Matrix& A1, const Vector& b,
|
||||
const SharedDiagonal& model, const boost::optional<Key>& dualKey) :
|
||||
dualKey_(dualKey) {
|
||||
const SharedDiagonal& model) {
|
||||
fillTerms(cref_list_of<1>(make_pair(i1, A1)), b, model);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const Key i1, const Matrix& A1, Key i2,
|
||||
const Matrix& A2, const Vector& b, const SharedDiagonal& model,
|
||||
const boost::optional<Key>& dualKey) :
|
||||
dualKey_(dualKey) {
|
||||
const Matrix& A2, const Vector& b, const SharedDiagonal& model) {
|
||||
fillTerms(cref_list_of<2>(make_pair(i1, A1))(make_pair(i2, A2)), b, model);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const Key i1, const Matrix& A1, Key i2,
|
||||
const Matrix& A2, Key i3, const Matrix& A3, const Vector& b,
|
||||
const SharedDiagonal& model, const boost::optional<Key>& dualKey) :
|
||||
dualKey_(dualKey) {
|
||||
const SharedDiagonal& model) {
|
||||
fillTerms(
|
||||
cref_list_of<3>(make_pair(i1, A1))(make_pair(i2, A2))(make_pair(i3, A3)),
|
||||
b, model);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const Key i1, const Matrix& A1, Key i2,
|
||||
const Matrix& A2, Key i3, const Matrix& A3, Key i4, const Matrix& A4,
|
||||
const Vector& b, const SharedDiagonal& model,
|
||||
const boost::optional<Key>& dualKey) :
|
||||
dualKey_(dualKey) {
|
||||
fillTerms(
|
||||
cref_list_of<4>(make_pair(i1, A1))(make_pair(i2, A2))(make_pair(i3, A3))(
|
||||
make_pair(i4, A4)), b, model);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const Key i1, const Matrix& A1, Key i2,
|
||||
const Matrix& A2, Key i3, const Matrix& A3, Key i4, const Matrix& A4,
|
||||
Key i5, const Matrix& A5, const Vector& b, const SharedDiagonal& model,
|
||||
const boost::optional<Key>& dualKey) :
|
||||
dualKey_(dualKey) {
|
||||
fillTerms(
|
||||
cref_list_of<5>(make_pair(i1, A1))(make_pair(i2, A2))(make_pair(i3, A3))(
|
||||
make_pair(i4, A4))(make_pair(i5, A5)), b, model);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const Key i1, const Matrix& A1, Key i2,
|
||||
const Matrix& A2, Key i3, const Matrix& A3, Key i4, const Matrix& A4,
|
||||
Key i5, const Matrix& A5, Key i6, const Matrix& A6, const Vector& b,
|
||||
const SharedDiagonal& model, const boost::optional<Key>& dualKey) :
|
||||
dualKey_(dualKey) {
|
||||
fillTerms(
|
||||
cref_list_of<6>(make_pair(i1, A1))(make_pair(i2, A2))(make_pair(i3, A3))(
|
||||
make_pair(i4, A4))(make_pair(i5, A5))(make_pair(i6, A6)), b, model);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const HessianFactor& factor) :
|
||||
Base(factor), Ab_(
|
||||
VerticalBlockMatrix::LikeActiveViewOf(factor.matrixObject(),
|
||||
factor.rows())), dualKey_(boost::none) {
|
||||
factor.rows())) {
|
||||
// Copy Hessian into our matrix and then do in-place Cholesky
|
||||
Ab_.full() = factor.matrixObject().full();
|
||||
|
||||
|
@ -152,13 +115,9 @@ JacobianFactor::JacobianFactor(const HessianFactor& factor) :
|
|||
bool success;
|
||||
boost::tie(maxrank, success) = choleskyCareful(Ab_.matrix());
|
||||
|
||||
// factor.print("HessianFactor to convert: ");
|
||||
// cout << "Maxrank: " << maxrank << ", success: " << int(success) << endl;
|
||||
|
||||
// Check for indefinite system
|
||||
if (!success) {
|
||||
if (!success)
|
||||
throw IndeterminantLinearSystemException(factor.keys().front());
|
||||
}
|
||||
|
||||
// Zero out lower triangle
|
||||
Ab_.matrix().topRows(maxrank).triangularView<Eigen::StrictlyLower>() =
|
||||
|
@ -261,8 +220,7 @@ FastVector<JacobianFactor::shared_ptr> _convertOrCastToJacobians(
|
|||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph,
|
||||
boost::optional<const Ordering&> ordering,
|
||||
boost::optional<const VariableSlots&> variableSlots) :
|
||||
dualKey_(boost::none) {
|
||||
boost::optional<const VariableSlots&> variableSlots) {
|
||||
gttic(JacobianFactor_combine_constructor);
|
||||
|
||||
// Compute VariableSlots if one was not provided
|
||||
|
@ -605,43 +563,19 @@ void JacobianFactor::multiplyHessianAdd(double alpha, const double* x,
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues JacobianFactor::gradientAtZero(
|
||||
const boost::optional<const VectorValues&> negDuals) const {
|
||||
VectorValues JacobianFactor::gradientAtZero() const {
|
||||
VectorValues g;
|
||||
|
||||
/* We treat linear constraints differently: They are not least square terms, 0.5*||Ax-b||^2,
|
||||
* but simply linear constraints: Ax-b=0.
|
||||
* The constraint function is c(x) = Ax-b. It's Jacobian is A,
|
||||
* and the gradient is the sum of columns of A', each optionally scaled by the
|
||||
* corresponding element of negDual vector.
|
||||
*/
|
||||
if (isConstrained()) {
|
||||
Vector scale = ones(rows());
|
||||
if (negDuals && dualKey_) {
|
||||
scale = negDuals->at(dualKey());
|
||||
if (scale.size() != rows())
|
||||
throw std::runtime_error(
|
||||
"Fail to scale the gradient with the dual vector: Size mismatch!");
|
||||
}
|
||||
if (negDuals && !dualKey_) {
|
||||
throw std::runtime_error(
|
||||
"Fail to scale the gradient with the dual vector: No dual key!");
|
||||
}
|
||||
this->transposeMultiplyAdd(1.0, scale, g); // g = A'*scale
|
||||
}
|
||||
else {
|
||||
Vector b = getb();
|
||||
// Gradient is really -A'*b / sigma^2
|
||||
// transposeMultiplyAdd will divide by sigma once, so we need one more
|
||||
Vector b_sigma = model_ ? model_->whiten(b) : b;
|
||||
this->transposeMultiplyAdd(-1.0, b_sigma, g); // g -= A'*b/sigma^2
|
||||
}
|
||||
return g;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void JacobianFactor::gradientAtZero(double* d) const {
|
||||
//throw std::runtime_error("gradientAtZero not implemented for Jacobian factor");
|
||||
throw std::runtime_error("Raw memory version of gradientAtZero not implemented for Jacobian factor");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -719,13 +653,6 @@ void JacobianFactor::setModel(bool anyConstrained, const Vector& sigmas) {
|
|||
model_ = noiseModel::Diagonal::Sigmas(sigmas);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void JacobianFactor::setModel(const noiseModel::Diagonal::shared_ptr& model) {
|
||||
if ((size_t) model->dim() != this->rows())
|
||||
throw InvalidNoiseModel(this->rows(), model->dim());
|
||||
model_ = model;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::pair<boost::shared_ptr<GaussianConditional>,
|
||||
boost::shared_ptr<JacobianFactor> > EliminateQR(
|
||||
|
@ -742,6 +669,7 @@ std::pair<boost::shared_ptr<GaussianConditional>,
|
|||
}
|
||||
|
||||
// Do dense elimination
|
||||
SharedDiagonal noiseModel;
|
||||
if (jointFactor->model_)
|
||||
jointFactor->model_ = jointFactor->model_->QR(jointFactor->Ab_.matrix());
|
||||
else
|
||||
|
@ -784,8 +712,8 @@ GaussianConditional::shared_ptr JacobianFactor::splitConditional(
|
|||
const DenseIndex maxRemainingRows = std::min(Ab_.cols() - 1, originalRowEnd)
|
||||
- Ab_.rowStart() - frontalDim;
|
||||
const DenseIndex remainingRows =
|
||||
model_ ? std::min(model_->sigmas().size() - frontalDim,
|
||||
maxRemainingRows) :
|
||||
model_ ?
|
||||
std::min(model_->sigmas().size() - frontalDim, maxRemainingRows) :
|
||||
maxRemainingRows;
|
||||
Ab_.rowStart() += frontalDim;
|
||||
Ab_.rowEnd() = Ab_.rowStart() + remainingRows;
|
||||
|
@ -797,11 +725,12 @@ GaussianConditional::shared_ptr JacobianFactor::splitConditional(
|
|||
keys_.erase(begin(), begin() + nrFrontals);
|
||||
// Set sigmas with the right model
|
||||
if (model_) {
|
||||
Vector sigmas = model_->sigmas().tail(remainingRows);
|
||||
if (sigmas.size() > 0 && sigmas.minCoeff() == 0.0)
|
||||
model_ = noiseModel::Constrained::MixedSigmas(sigmas);
|
||||
if (model_->isConstrained())
|
||||
model_ = noiseModel::Constrained::MixedSigmas(
|
||||
model_->sigmas().tail(remainingRows));
|
||||
else
|
||||
model_ = noiseModel::Diagonal::Sigmas(sigmas, false); // I don't want to be smart here
|
||||
model_ = noiseModel::Diagonal::Sigmas(
|
||||
model_->sigmas().tail(remainingRows));
|
||||
assert(model_->dim() == (size_t)Ab_.rows());
|
||||
}
|
||||
gttoc(remaining_factor);
|
||||
|
|
|
@ -288,9 +288,12 @@ namespace gtsam {
|
|||
/// A'*b for Jacobian
|
||||
VectorValues gradientAtZero() const;
|
||||
|
||||
/* ************************************************************************* */
|
||||
/// A'*b for Jacobian (raw memory version)
|
||||
virtual void gradientAtZero(double* d) const;
|
||||
|
||||
/// Compute the gradient wrt a key at any values
|
||||
Vector gradient(Key key, const VectorValues& x) const;
|
||||
|
||||
/** Return a whitened version of the factor, i.e. with unit diagonal noise model. */
|
||||
JacobianFactor whiten() const;
|
||||
|
||||
|
|
|
@ -452,22 +452,22 @@ TEST(HessianFactor, gradientAtZero)
|
|||
/* ************************************************************************* */
|
||||
TEST(HessianFactor, gradient)
|
||||
{
|
||||
Matrix G11 = (Matrix(1, 1) << 1);
|
||||
Matrix G12 = (Matrix(1, 2) << 0, 0);
|
||||
Matrix G22 = (Matrix(2, 2) << 1, 0, 0, 1);
|
||||
Vector g1 = (Vector(1) << -7);
|
||||
Vector g2 = (Vector(2) << -8, -9);
|
||||
Matrix G11 = (Matrix(1, 1) << 1).finished();
|
||||
Matrix G12 = (Matrix(1, 2) << 0, 0).finished();
|
||||
Matrix G22 = (Matrix(2, 2) << 1, 0, 0, 1).finished();
|
||||
Vector g1 = (Vector(1) << -7).finished();
|
||||
Vector g2 = (Vector(2) << -8, -9).finished();
|
||||
double f = 194;
|
||||
|
||||
HessianFactor factor(0, 1, G11, G12, g1, G22, g2, f);
|
||||
|
||||
// test gradient
|
||||
Vector x0 = (Vector(1) << 3.0);
|
||||
Vector x1 = (Vector(2) << -3.5, 7.1);
|
||||
Vector x0 = (Vector(1) << 3.0).finished();
|
||||
Vector x1 = (Vector(2) << -3.5, 7.1).finished();
|
||||
VectorValues x = pair_list_of<Key, Vector>(0, x0) (1, x1);
|
||||
|
||||
Vector expectedGrad0 = (Vector(1) << 10.0);
|
||||
Vector expectedGrad1 = (Vector(2) << 4.5, 16.1);
|
||||
Vector expectedGrad0 = (Vector(1) << 10.0).finished();
|
||||
Vector expectedGrad1 = (Vector(2) << 4.5, 16.1).finished();
|
||||
Vector grad0 = factor.gradient(0, x);
|
||||
Vector grad1 = factor.gradient(1, x);
|
||||
|
||||
|
|
|
@ -365,22 +365,6 @@ GaussianFactorGraph::shared_ptr NonlinearFactorGraph::linearize(const Values& li
|
|||
return linearFG;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
boost::shared_ptr<GaussianFactorGraph> NonlinearFactorGraph::multipliedHessians(
|
||||
const Values& linearizationPoint, const VectorValues& duals) const {
|
||||
GaussianFactorGraph::shared_ptr hessianFG = boost::make_shared<GaussianFactorGraph>();
|
||||
hessianFG->reserve(this->size());
|
||||
|
||||
// create multiplied Hessians for all factors
|
||||
BOOST_FOREACH(const sharedFactor& factor, this->factors_) {
|
||||
if(factor) {
|
||||
(*hessianFG) += factor->multipliedHessian(linearizationPoint, duals);
|
||||
} else
|
||||
(*hessianFG) += GaussianFactor::shared_ptr();
|
||||
}
|
||||
return hessianFG;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearFactorGraph NonlinearFactorGraph::clone() const {
|
||||
NonlinearFactorGraph result;
|
||||
|
|
|
@ -134,13 +134,6 @@ namespace gtsam {
|
|||
*/
|
||||
boost::shared_ptr<GaussianFactorGraph> linearize(const Values& linearizationPoint) const;
|
||||
|
||||
/**
|
||||
* Produce a graph of dual-scaled Hessians of each factor: lambda*H,
|
||||
* used for solving nonlinear equality constraints using SQP.
|
||||
*/
|
||||
boost::shared_ptr<GaussianFactorGraph> multipliedHessians(
|
||||
const Values& linearizationPoint, const VectorValues& duals) const;
|
||||
|
||||
/**
|
||||
* Clone() performs a deep-copy of the graph, including all of the factors
|
||||
*/
|
||||
|
|
|
@ -453,6 +453,12 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
/// Gradient wrt a key at any values
|
||||
Vector gradient(Key key, const VectorValues& x) const {
|
||||
throw std::runtime_error("gradient for RegularImplicitSchurFactor is not implemented yet");
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
// RegularImplicitSchurFactor
|
||||
|
||||
|
|
|
@ -59,29 +59,6 @@ public:
|
|||
noiseModel::Constrained::All(b.rows())) {
|
||||
}
|
||||
|
||||
/** Construct four-ary factor */
|
||||
LinearConstraint(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
|
||||
const Matrix& A3, Key i4, const Matrix& A4, const Vector& b) :
|
||||
Base(i1, A1, i2, A2, i3, A3, i4, A4, b,
|
||||
noiseModel::Constrained::All(b.rows())) {
|
||||
}
|
||||
|
||||
/** Construct five-ary factor */
|
||||
LinearConstraint(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
|
||||
const Matrix& A3, Key i4, const Matrix& A4, Key i5, const Matrix& A5,
|
||||
const Vector& b) :
|
||||
Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, b,
|
||||
noiseModel::Constrained::All(b.rows())) {
|
||||
}
|
||||
|
||||
/** Construct six-ary factor */
|
||||
LinearConstraint(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
|
||||
const Matrix& A3, Key i4, const Matrix& A4, Key i5, const Matrix& A5,
|
||||
Key i6, const Matrix& A6, const Vector& b) :
|
||||
Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, i6, A6, b,
|
||||
noiseModel::Constrained::All(b.rows())) {
|
||||
}
|
||||
|
||||
/** Construct an n-ary factor
|
||||
* @tparam TERMS A container whose value type is std::pair<Key, Matrix>, specifying the
|
||||
* collection of keys and matrices making up the factor. */
|
||||
|
|
|
@ -65,29 +65,6 @@ public:
|
|||
dualKey) {
|
||||
}
|
||||
|
||||
/** Construct four-ary factor */
|
||||
LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
|
||||
const Matrix& A3, Key i4, const Matrix& A4, const Vector& b, Key dualKey) :
|
||||
Base(i1, A1, i2, A2, i3, A3, i4, A4, b,
|
||||
noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
|
||||
}
|
||||
|
||||
/** Construct five-ary factor */
|
||||
LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
|
||||
const Matrix& A3, Key i4, const Matrix& A4, Key i5, const Matrix& A5,
|
||||
const Vector& b, Key dualKey) :
|
||||
Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, b,
|
||||
noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
|
||||
}
|
||||
|
||||
/** Construct six-ary factor */
|
||||
LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
|
||||
const Matrix& A3, Key i4, const Matrix& A4, Key i5, const Matrix& A5,
|
||||
Key i6, const Matrix& A6, const Vector& b, Key dualKey) :
|
||||
Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, i6, A6, b,
|
||||
noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
|
||||
}
|
||||
|
||||
/** Construct an n-ary factor
|
||||
* @tparam TERMS A container whose value type is std::pair<Key, Matrix>, specifying the
|
||||
* collection of keys and matrices making up the factor. */
|
||||
|
|
|
@ -52,44 +52,21 @@ public:
|
|||
|
||||
/** Construct unary factor */
|
||||
LinearInequality(Key i1, const RowVector& A1, double b, Key dualKey) :
|
||||
Base(i1, A1, (Vector(1) << b), noiseModel::Constrained::All(1)), dualKey_(
|
||||
Base(i1, A1, (Vector(1) << b).finished(), noiseModel::Constrained::All(1)), dualKey_(
|
||||
dualKey) {
|
||||
}
|
||||
|
||||
/** Construct binary factor */
|
||||
LinearInequality(Key i1, const RowVector& A1, Key i2, const RowVector& A2, double b,
|
||||
Key dualKey) :
|
||||
Base(i1, A1, i2, A2, (Vector(1) << b), noiseModel::Constrained::All(1)), dualKey_(
|
||||
Base(i1, A1, i2, A2, (Vector(1) << b).finished(), noiseModel::Constrained::All(1)), dualKey_(
|
||||
dualKey) {
|
||||
}
|
||||
|
||||
/** Construct ternary factor */
|
||||
LinearInequality(Key i1, const RowVector& A1, Key i2, const RowVector& A2, Key i3,
|
||||
const RowVector& A3, double b, Key dualKey) :
|
||||
Base(i1, A1, i2, A2, i3, A3, (Vector(1) << b),
|
||||
noiseModel::Constrained::All(1)), dualKey_(dualKey) {
|
||||
}
|
||||
|
||||
/** Construct four-ary factor */
|
||||
LinearInequality(Key i1, const RowVector& A1, Key i2, const RowVector& A2, Key i3,
|
||||
const RowVector& A3, Key i4, const RowVector& A4, double b, Key dualKey) :
|
||||
Base(i1, A1, i2, A2, i3, A3, i4, A4, (Vector(1) << b),
|
||||
noiseModel::Constrained::All(1)), dualKey_(dualKey) {
|
||||
}
|
||||
|
||||
/** Construct five-ary factor */
|
||||
LinearInequality(Key i1, const RowVector& A1, Key i2, const RowVector& A2, Key i3,
|
||||
const RowVector& A3, Key i4, const RowVector& A4, Key i5, const RowVector& A5,
|
||||
double b, Key dualKey) :
|
||||
Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, (Vector(1) << b),
|
||||
noiseModel::Constrained::All(1)), dualKey_(dualKey) {
|
||||
}
|
||||
|
||||
/** Construct six-ary factor */
|
||||
LinearInequality(Key i1, const RowVector& A1, Key i2, const RowVector& A2, Key i3,
|
||||
const RowVector& A3, Key i4, const RowVector& A4, Key i5, const RowVector& A5,
|
||||
Key i6, const RowVector& A6, double b, Key dualKey) :
|
||||
Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, i6, A6, (Vector(1) << b),
|
||||
Base(i1, A1, i2, A2, i3, A3, (Vector(1) << b).finished(),
|
||||
noiseModel::Constrained::All(1)), dualKey_(dualKey) {
|
||||
}
|
||||
|
||||
|
@ -98,7 +75,7 @@ public:
|
|||
* collection of keys and matrices making up the factor. */
|
||||
template<typename TERMS>
|
||||
LinearInequality(const TERMS& terms, double b, Key dualKey) :
|
||||
Base(terms, (Vector(1) << b), noiseModel::Constrained::All(1)), dualKey_(
|
||||
Base(terms, (Vector(1) << b).finished(), noiseModel::Constrained::All(1)), dualKey_(
|
||||
dualKey) {
|
||||
}
|
||||
|
||||
|
|
|
@ -21,16 +21,25 @@ TEST(LPSolver, oneD) {
|
|||
objCoeffs.insert(Y(1), repeat(1, -5.0));
|
||||
objCoeffs.insert(X(2), repeat(1, -4.0));
|
||||
objCoeffs.insert(X(3), repeat(1, -6.0));
|
||||
LinearInequality inequality(
|
||||
Y(1), (Matrix(3,1)<< 1,3,3),
|
||||
X(2), (Matrix(3,1)<< -1,2,2),
|
||||
X(3), (Matrix(3,1)<< 1,4,0), (Vector(3)<<20,42,30), 0);
|
||||
LinearInequality inequality1(
|
||||
Y(1), (Matrix(1,1)<< 1).finished(),
|
||||
X(2), (Matrix(1,1)<< -1).finished(),
|
||||
X(3), (Matrix(1,1)<< 1).finished(), 20, 0);
|
||||
LinearInequality inequality2(
|
||||
Y(1), (Matrix(1,1)<< 3).finished(),
|
||||
X(2), (Matrix(1,1)<< 2).finished(),
|
||||
X(3), (Matrix(1,1)<< 4).finished(), 42, 1);
|
||||
LinearInequality inequality3(
|
||||
Y(1), (Matrix(1,1)<< 3).finished(),
|
||||
X(2), (Matrix(1,1)<< 2).finished(), 30, 2);
|
||||
VectorValues lowerBounds;
|
||||
lowerBounds.insert(Y(1), zero(1));
|
||||
lowerBounds.insert(X(2), zero(1));
|
||||
lowerBounds.insert(X(3), zero(1));
|
||||
LinearInequalityFactorGraph::shared_ptr inequalities(new LinearInequalityFactorGraph);
|
||||
inequalities->push_back(inequality);
|
||||
inequalities->push_back(inequality1);
|
||||
inequalities->push_back(inequality2);
|
||||
inequalities->push_back(inequality3);
|
||||
LinearEqualityFactorGraph::shared_ptr equalities(new LinearEqualityFactorGraph);
|
||||
|
||||
LPSolver solver(objCoeffs, equalities, inequalities, lowerBounds);
|
||||
|
@ -64,14 +73,20 @@ TEST(LPSolver, oneD) {
|
|||
|
||||
TEST(LPSolver, threeD) {
|
||||
VectorValues objCoeffs;
|
||||
objCoeffs.insert(X(1), (Vector(3)<<-5.0, -4.0, -6.0));
|
||||
LinearInequality inequality(
|
||||
X(1), (Matrix(3,3)<< 1,-1,1,3,2,4,3,2,0), (Vector(3)<<20,42,30), 0);
|
||||
objCoeffs.insert(X(1), (Vector(3)<<-5.0, -4.0, -6.0).finished());
|
||||
LinearInequality inequality1(
|
||||
X(1), (Matrix(1,3)<< 1,-1,1).finished(), 20, 0);
|
||||
LinearInequality inequality2(
|
||||
X(1), (Matrix(1,3)<< 3,2,4).finished(), 42, 1);
|
||||
LinearInequality inequality3(
|
||||
X(1), (Matrix(1,3)<< 3,2,0).finished(), 30, 2);
|
||||
VectorValues lowerBounds;
|
||||
lowerBounds.insert(X(1), zeros(3,1));
|
||||
|
||||
LinearInequalityFactorGraph::shared_ptr inequalities(new LinearInequalityFactorGraph);
|
||||
inequalities->push_back(inequality);
|
||||
inequalities->push_back(inequality1);
|
||||
inequalities->push_back(inequality2);
|
||||
inequalities->push_back(inequality3);
|
||||
LinearEqualityFactorGraph::shared_ptr equalities(new LinearEqualityFactorGraph);
|
||||
|
||||
LPSolver solver(objCoeffs, equalities, inequalities, lowerBounds);
|
||||
|
@ -95,7 +110,7 @@ TEST(LPSolver, threeD) {
|
|||
|
||||
VectorValues solution = solver.solve();
|
||||
VectorValues expectedSolution;
|
||||
expectedSolution.insert(X(1), (Vector(3)<<0.0, 15, 3));
|
||||
expectedSolution.insert(X(1), (Vector(3)<<0.0, 15, 3).finished());
|
||||
EXPECT(assert_equal(expectedSolution, solution));
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ namespace {
|
|||
(make_pair(15, 3*Matrix3::Identity()));
|
||||
|
||||
// RHS and sigmas
|
||||
const Vector b = (Vector(3) << 1., 2., 3.);
|
||||
const Vector b = (Vector(3) << 1., 2., 3.).finished();
|
||||
const SharedDiagonal noise = noiseModel::Constrained::All(3);
|
||||
}
|
||||
}
|
||||
|
@ -96,8 +96,8 @@ TEST(LinearConstraint, Hessian_conversion) {
|
|||
1.57, 2.695, -1.1, -2.35,
|
||||
2.695, 11.3125, -0.65, -10.225,
|
||||
-1.1, -0.65, 1, 0.5,
|
||||
-2.35, -10.225, 0.5, 9.25),
|
||||
(Vector(4) << -7.885, -28.5175, 2.75, 25.675),
|
||||
-2.35, -10.225, 0.5, 9.25).finished(),
|
||||
(Vector(4) << -7.885, -28.5175, 2.75, 25.675).finished(),
|
||||
73.1725);
|
||||
|
||||
try {
|
||||
|
@ -187,22 +187,22 @@ TEST(LinearConstraint, matrices)
|
|||
TEST(LinearConstraint, operators )
|
||||
{
|
||||
Matrix I = eye(2);
|
||||
Vector b = (Vector(2) << 0.2,-0.1);
|
||||
Vector b = (Vector(2) << 0.2,-0.1).finished();
|
||||
LinearConstraint lf(1, -I, 2, I, b);
|
||||
|
||||
VectorValues c;
|
||||
c.insert(1, (Vector(2) << 10.,20.));
|
||||
c.insert(2, (Vector(2) << 30.,60.));
|
||||
c.insert(1, (Vector(2) << 10.,20.).finished());
|
||||
c.insert(2, (Vector(2) << 30.,60.).finished());
|
||||
|
||||
// test A*x
|
||||
Vector expectedE = (Vector(2) << 20.,40.);
|
||||
Vector expectedE = (Vector(2) << 20.,40.).finished();
|
||||
Vector actualE = lf * c;
|
||||
EXPECT(assert_equal(expectedE, actualE));
|
||||
|
||||
// test A^e
|
||||
VectorValues expectedX;
|
||||
expectedX.insert(1, (Vector(2) << -20.,-40.));
|
||||
expectedX.insert(2, (Vector(2) << 20., 40.));
|
||||
expectedX.insert(1, (Vector(2) << -20.,-40.).finished());
|
||||
expectedX.insert(2, (Vector(2) << 20., 40.).finished());
|
||||
VectorValues actualX = VectorValues::Zero(expectedX);
|
||||
lf.transposeMultiplyAdd(1.0, actualE, actualX);
|
||||
EXPECT(assert_equal(expectedX, actualX));
|
||||
|
@ -210,8 +210,8 @@ TEST(LinearConstraint, operators )
|
|||
// test gradient at zero
|
||||
Matrix A; Vector b2; boost::tie(A,b2) = lf.jacobian();
|
||||
VectorValues expectedG;
|
||||
expectedG.insert(1, (Vector(2) << -1,-1));
|
||||
expectedG.insert(2, (Vector(2) << 1, 1));
|
||||
expectedG.insert(1, (Vector(2) << -1,-1).finished());
|
||||
expectedG.insert(2, (Vector(2) << 1, 1).finished());
|
||||
VectorValues actualG = lf.gradientAtZero();
|
||||
EXPECT(assert_equal(expectedG, actualG));
|
||||
}
|
||||
|
|
|
@ -66,18 +66,18 @@ TEST(QPSolver, constraintsAux) {
|
|||
QPSolver solver(qp);
|
||||
|
||||
VectorValues lambdas;
|
||||
lambdas.insert(0, (Vector(1) << -0.5));
|
||||
lambdas.insert(1, (Vector(1) << 0.0));
|
||||
lambdas.insert(2, (Vector(1) << 0.3));
|
||||
lambdas.insert(3, (Vector(1) << 0.1));
|
||||
lambdas.insert(0, (Vector(1) << -0.5).finished());
|
||||
lambdas.insert(1, (Vector(1) << 0.0).finished());
|
||||
lambdas.insert(2, (Vector(1) << 0.3).finished());
|
||||
lambdas.insert(3, (Vector(1) << 0.1).finished());
|
||||
int factorIx = solver.identifyLeavingConstraint(qp.inequalities, lambdas);
|
||||
LONGS_EQUAL(2, factorIx);
|
||||
|
||||
VectorValues lambdas2;
|
||||
lambdas2.insert(0, (Vector(1) << -0.5));
|
||||
lambdas2.insert(1, (Vector(1) << 0.0));
|
||||
lambdas2.insert(2, (Vector(1) << -0.3));
|
||||
lambdas2.insert(3, (Vector(1) << -0.1));
|
||||
lambdas2.insert(0, (Vector(1) << -0.5).finished());
|
||||
lambdas2.insert(1, (Vector(1) << 0.0).finished());
|
||||
lambdas2.insert(2, (Vector(1) << -0.3).finished());
|
||||
lambdas2.insert(3, (Vector(1) << -0.1).finished());
|
||||
int factorIx2 = solver.identifyLeavingConstraint(qp.inequalities, lambdas2);
|
||||
LONGS_EQUAL(-1, factorIx2);
|
||||
}
|
||||
|
@ -97,9 +97,9 @@ QP createEqualityConstrainedTest() {
|
|||
|
||||
// Equality constraints
|
||||
// x1 + x2 = 1 --> x1 + x2 -1 = 0, hence we negate the b vector
|
||||
Matrix A1 = (Matrix(1, 1) << 1);
|
||||
Matrix A2 = (Matrix(1, 1) << 1);
|
||||
Vector b = -(Vector(1) << 1);
|
||||
Matrix A1 = (Matrix(1, 1) << 1).finished();
|
||||
Matrix A2 = (Matrix(1, 1) << 1).finished();
|
||||
Vector b = -(Vector(1) << 1).finished();
|
||||
qp.equalities.push_back(LinearEquality(X(1), A1, X(2), A2, b, 0));
|
||||
|
||||
return qp;
|
||||
|
@ -119,7 +119,7 @@ TEST(QPSolver, dual) {
|
|||
qp.inequalities, initialValues);
|
||||
VectorValues dual = dualGraph->optimize();
|
||||
VectorValues expectedDual;
|
||||
expectedDual.insert(0, (Vector(1) << 2.0));
|
||||
expectedDual.insert(0, (Vector(1) << 2.0).finished());
|
||||
CHECK(assert_equal(expectedDual, dual, 1e-10));
|
||||
}
|
||||
|
||||
|
@ -142,8 +142,8 @@ TEST(QPSolver, indentifyActiveConstraints) {
|
|||
|
||||
VectorValues solution = solver.solveWithCurrentWorkingSet(workingSet);
|
||||
VectorValues expectedSolution;
|
||||
expectedSolution.insert(X(1), (Vector(1) << 0.0));
|
||||
expectedSolution.insert(X(2), (Vector(1) << 0.0));
|
||||
expectedSolution.insert(X(1), (Vector(1) << 0.0).finished());
|
||||
expectedSolution.insert(X(2), (Vector(1) << 0.0).finished());
|
||||
CHECK(assert_equal(expectedSolution, solution, 1e-100));
|
||||
|
||||
}
|
||||
|
@ -158,20 +158,20 @@ TEST(QPSolver, iterate) {
|
|||
currentSolution.insert(X(2), zero(1));
|
||||
|
||||
std::vector<VectorValues> expectedSolutions(4), expectedDuals(4);
|
||||
expectedSolutions[0].insert(X(1), (Vector(1) << 0.0));
|
||||
expectedSolutions[0].insert(X(2), (Vector(1) << 0.0));
|
||||
expectedDuals[0].insert(1, (Vector(1) << 3));
|
||||
expectedDuals[0].insert(2, (Vector(1) << 0));
|
||||
expectedSolutions[0].insert(X(1), (Vector(1) << 0.0).finished());
|
||||
expectedSolutions[0].insert(X(2), (Vector(1) << 0.0).finished());
|
||||
expectedDuals[0].insert(1, (Vector(1) << 3).finished());
|
||||
expectedDuals[0].insert(2, (Vector(1) << 0).finished());
|
||||
|
||||
expectedSolutions[1].insert(X(1), (Vector(1) << 1.5));
|
||||
expectedSolutions[1].insert(X(2), (Vector(1) << 0.0));
|
||||
expectedDuals[1].insert(3, (Vector(1) << 1.5));
|
||||
expectedSolutions[1].insert(X(1), (Vector(1) << 1.5).finished());
|
||||
expectedSolutions[1].insert(X(2), (Vector(1) << 0.0).finished());
|
||||
expectedDuals[1].insert(3, (Vector(1) << 1.5).finished());
|
||||
|
||||
expectedSolutions[2].insert(X(1), (Vector(1) << 1.5));
|
||||
expectedSolutions[2].insert(X(2), (Vector(1) << 0.75));
|
||||
expectedSolutions[2].insert(X(1), (Vector(1) << 1.5).finished());
|
||||
expectedSolutions[2].insert(X(2), (Vector(1) << 0.75).finished());
|
||||
|
||||
expectedSolutions[3].insert(X(1), (Vector(1) << 1.5));
|
||||
expectedSolutions[3].insert(X(2), (Vector(1) << 0.5));
|
||||
expectedSolutions[3].insert(X(1), (Vector(1) << 1.5).finished());
|
||||
expectedSolutions[3].insert(X(2), (Vector(1) << 0.5).finished());
|
||||
|
||||
LinearInequalityFactorGraph workingSet =
|
||||
solver.identifyActiveConstraints(qp.inequalities, currentSolution);
|
||||
|
@ -204,8 +204,8 @@ TEST(QPSolver, optimizeForst10book_pg171Ex5) {
|
|||
VectorValues solution;
|
||||
boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues);
|
||||
VectorValues expectedSolution;
|
||||
expectedSolution.insert(X(1), (Vector(1) << 1.5));
|
||||
expectedSolution.insert(X(2), (Vector(1) << 0.5));
|
||||
expectedSolution.insert(X(1), (Vector(1) << 1.5).finished());
|
||||
expectedSolution.insert(X(2), (Vector(1) << 0.5).finished());
|
||||
CHECK(assert_equal(expectedSolution, solution, 1e-100));
|
||||
}
|
||||
|
||||
|
@ -241,8 +241,8 @@ TEST(QPSolver, optimizeMatlabEx) {
|
|||
VectorValues solution;
|
||||
boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues);
|
||||
VectorValues expectedSolution;
|
||||
expectedSolution.insert(X(1), (Vector(1) << 2.0 / 3.0));
|
||||
expectedSolution.insert(X(2), (Vector(1) << 4.0 / 3.0));
|
||||
expectedSolution.insert(X(1), (Vector(1) << 2.0 / 3.0).finished());
|
||||
expectedSolution.insert(X(2), (Vector(1) << 4.0 / 3.0).finished());
|
||||
CHECK(assert_equal(expectedSolution, solution, 1e-7));
|
||||
}
|
||||
|
||||
|
@ -268,14 +268,14 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4) {
|
|||
QP qp = createTestNocedal06bookEx16_4();
|
||||
QPSolver solver(qp);
|
||||
VectorValues initialValues;
|
||||
initialValues.insert(X(1), (Vector(1) << 2.0));
|
||||
initialValues.insert(X(1), (Vector(1) << 2.0).finished());
|
||||
initialValues.insert(X(2), zero(1));
|
||||
|
||||
VectorValues solution;
|
||||
boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues);
|
||||
VectorValues expectedSolution;
|
||||
expectedSolution.insert(X(1), (Vector(1) << 1.4));
|
||||
expectedSolution.insert(X(2), (Vector(1) << 1.7));
|
||||
expectedSolution.insert(X(1), (Vector(1) << 1.4).finished());
|
||||
expectedSolution.insert(X(2), (Vector(1) << 1.7).finished());
|
||||
CHECK(assert_equal(expectedSolution, solution, 1e-7));
|
||||
}
|
||||
|
||||
|
@ -310,7 +310,7 @@ QP modifyNocedal06bookEx16_4() {
|
|||
|
||||
// Inequality constraints
|
||||
noiseModel::Constrained::shared_ptr noise =
|
||||
noiseModel::Constrained::MixedSigmas((Vector(1) << -1));
|
||||
noiseModel::Constrained::MixedSigmas((Vector(1) << -1).finished());
|
||||
qp.inequalities.push_back(LinearInequality(X(1), -One, X(2), 2 * One, -1, 0));
|
||||
qp.inequalities.push_back(LinearInequality(X(1), One, X(2), 2 * One, 6, 1));
|
||||
qp.inequalities.push_back(LinearInequality(X(1), One, X(2), -2 * One, 2, 2));
|
||||
|
@ -376,12 +376,12 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4_2) {
|
|||
QP qp = createTestNocedal06bookEx16_4();
|
||||
QPSolver solver(qp);
|
||||
VectorValues initialValues;
|
||||
initialValues.insert(X(1), (Vector(1) << 0.0));
|
||||
initialValues.insert(X(2), (Vector(1) << 100.0));
|
||||
initialValues.insert(X(1), (Vector(1) << 0.0).finished());
|
||||
initialValues.insert(X(2), (Vector(1) << 100.0).finished());
|
||||
|
||||
VectorValues expectedSolution;
|
||||
expectedSolution.insert(X(1), (Vector(1) << 1.4));
|
||||
expectedSolution.insert(X(2), (Vector(1) << 1.7));
|
||||
expectedSolution.insert(X(1), (Vector(1) << 1.4).finished());
|
||||
expectedSolution.insert(X(2), (Vector(1) << 1.7).finished());
|
||||
|
||||
VectorValues solution;
|
||||
boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues);
|
||||
|
@ -400,10 +400,10 @@ TEST(QPSolver, failedSubproblem) {
|
|||
qp.cost.push_back(JacobianFactor(X(1), eye(2), zero(2)));
|
||||
qp.cost.push_back(HessianFactor(X(1), zeros(2, 2), zero(2), 100.0));
|
||||
qp.inequalities.push_back(
|
||||
LinearInequality(X(1), (Matrix(1,2) << -1.0, 0.0), -1.0, 0));
|
||||
LinearInequality(X(1), (Matrix(1,2) << -1.0, 0.0).finished(), -1.0, 0));
|
||||
|
||||
VectorValues expected;
|
||||
expected.insert(X(1), (Vector(2) << 1.0, 0.0));
|
||||
expected.insert(X(1), (Vector(2) << 1.0, 0.0).finished());
|
||||
|
||||
QPSolver solver(qp);
|
||||
VectorValues solution;
|
||||
|
|
|
@ -121,4 +121,3 @@ int main()
|
|||
TestResult tr; return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ static SharedDiagonal sigma0_2 = noiseModel::Isotropic::Sigma(2,0.2);
|
|||
static SharedDiagonal constraintModel = noiseModel::Constrained::All(2);
|
||||
|
||||
static const Key _l1_=0, _x1_=1, _x2_=2;
|
||||
static const Key _x_=0, _y_=1, _z_=2, _d_=3, _e_=4;
|
||||
static const Key _x_=0, _y_=1, _z_=2;
|
||||
} // \namespace impl
|
||||
|
||||
|
||||
|
@ -423,7 +423,7 @@ inline GaussianFactorGraph createSimpleConstraintGraph() {
|
|||
Matrix Ay1 = eye(2) * -1;
|
||||
Vector b2 = Vector2(0.0, 0.0);
|
||||
JacobianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2,
|
||||
constraintModel, _d_));
|
||||
constraintModel));
|
||||
|
||||
// construct the graph
|
||||
GaussianFactorGraph fg;
|
||||
|
@ -469,7 +469,7 @@ inline GaussianFactorGraph createSingleConstraintGraph() {
|
|||
Matrix Ay1 = eye(2) * 10;
|
||||
Vector b2 = Vector2(1.0, 2.0);
|
||||
JacobianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2,
|
||||
constraintModel, _d_));
|
||||
constraintModel));
|
||||
|
||||
// construct the graph
|
||||
GaussianFactorGraph fg;
|
||||
|
@ -513,7 +513,7 @@ inline GaussianFactorGraph createMultiConstraintGraph() {
|
|||
b1(0) = 1.0;
|
||||
b1(1) = 2.0;
|
||||
JacobianFactor::shared_ptr lc1(new JacobianFactor(_x_, A11, _y_, A12, b1,
|
||||
constraintModel, _d_));
|
||||
constraintModel));
|
||||
|
||||
// constraint 2
|
||||
Matrix A21(2, 2);
|
||||
|
@ -532,7 +532,7 @@ inline GaussianFactorGraph createMultiConstraintGraph() {
|
|||
b2(0) = 3.0;
|
||||
b2(1) = 4.0;
|
||||
JacobianFactor::shared_ptr lc2(new JacobianFactor(_x_, A21, _z_, A22, b2,
|
||||
constraintModel, _e_));
|
||||
constraintModel));
|
||||
|
||||
// construct the graph
|
||||
GaussianFactorGraph fg;
|
||||
|
|
|
@ -593,54 +593,54 @@ TEST( GaussianFactorGraph, conditional_sigma_failure) {
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, buildDualGraph1 )
|
||||
{
|
||||
GaussianFactorGraph fgc1 = createSimpleConstraintGraph();
|
||||
KeySet constrainedVariables1 = list_of(0)(1);
|
||||
VariableIndex variableIndex1(fgc1);
|
||||
VectorValues delta1 = createSimpleConstraintValues();
|
||||
GaussianFactorGraph::shared_ptr dualGraph1 = fgc1.buildDualGraph(
|
||||
constrainedVariables1, variableIndex1, delta1);
|
||||
GaussianFactorGraph expectedDualGraph1;
|
||||
expectedDualGraph1.push_back(JacobianFactor(3, eye(2), zero(2)));
|
||||
expectedDualGraph1.push_back(JacobianFactor(3, -eye(2), zero(2)));
|
||||
EXPECT(assert_equal(expectedDualGraph1, *dualGraph1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, buildDualGraph2 )
|
||||
{
|
||||
GaussianFactorGraph fgc2 = createSingleConstraintGraph();
|
||||
KeySet constrainedVariables2 = list_of(0)(1);
|
||||
VariableIndex variableIndex2(fgc2);
|
||||
VectorValues delta2 = createSingleConstraintValues();
|
||||
GaussianFactorGraph::shared_ptr dualGraph2 = fgc2.buildDualGraph(
|
||||
constrainedVariables2, variableIndex2, delta2);
|
||||
GaussianFactorGraph expectedDualGraph2;
|
||||
expectedDualGraph2.push_back(JacobianFactor(3, (Matrix(2,2) << 1,2,2,1), zero(2)));
|
||||
expectedDualGraph2.push_back(JacobianFactor(3, 10*eye(2), zero(2)));
|
||||
EXPECT(assert_equal(expectedDualGraph2, *dualGraph2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, buildDualGraph3 )
|
||||
{
|
||||
GaussianFactorGraph fgc3 = createMultiConstraintGraph();
|
||||
KeySet constrainedVariables3 = list_of(0)(1)(2);
|
||||
VariableIndex variableIndex3(fgc3);
|
||||
VectorValues delta3 = createMultiConstraintValues();
|
||||
GaussianFactorGraph::shared_ptr dualGraph3 = fgc3.buildDualGraph(
|
||||
constrainedVariables3, variableIndex3, delta3);
|
||||
GaussianFactorGraph expectedDualGraph3;
|
||||
expectedDualGraph3.push_back(
|
||||
JacobianFactor(3, (Matrix(2, 2) << 1, 2, 2, 1), 4,
|
||||
(Matrix(2, 2) << 3, -1, 4, -2), zero(2)));
|
||||
expectedDualGraph3.push_back(JacobianFactor(3, 10*eye(2), zero(2)));
|
||||
expectedDualGraph3.push_back(
|
||||
JacobianFactor(4, (Matrix(2, 2) << 1, 1, 1, 2), zero(2)));
|
||||
EXPECT(assert_equal(expectedDualGraph3, *dualGraph3));
|
||||
}
|
||||
///* ************************************************************************* */
|
||||
//TEST( GaussianFactorGraph, buildDualGraph1 )
|
||||
//{
|
||||
// GaussianFactorGraph fgc1 = createSimpleConstraintGraph();
|
||||
// KeySet constrainedVariables1 = list_of(0)(1);
|
||||
// VariableIndex variableIndex1(fgc1);
|
||||
// VectorValues delta1 = createSimpleConstraintValues();
|
||||
// GaussianFactorGraph::shared_ptr dualGraph1 = fgc1.buildDualGraph(
|
||||
// constrainedVariables1, variableIndex1, delta1);
|
||||
// GaussianFactorGraph expectedDualGraph1;
|
||||
// expectedDualGraph1.push_back(JacobianFactor(3, eye(2), zero(2)));
|
||||
// expectedDualGraph1.push_back(JacobianFactor(3, -eye(2), zero(2)));
|
||||
// EXPECT(assert_equal(expectedDualGraph1, *dualGraph1));
|
||||
//}
|
||||
//
|
||||
///* ************************************************************************* */
|
||||
//TEST( GaussianFactorGraph, buildDualGraph2 )
|
||||
//{
|
||||
// GaussianFactorGraph fgc2 = createSingleConstraintGraph();
|
||||
// KeySet constrainedVariables2 = list_of(0)(1);
|
||||
// VariableIndex variableIndex2(fgc2);
|
||||
// VectorValues delta2 = createSingleConstraintValues();
|
||||
// GaussianFactorGraph::shared_ptr dualGraph2 = fgc2.buildDualGraph(
|
||||
// constrainedVariables2, variableIndex2, delta2);
|
||||
// GaussianFactorGraph expectedDualGraph2;
|
||||
// expectedDualGraph2.push_back(JacobianFactor(3, (Matrix(2,2) << 1,2,2,1), zero(2)));
|
||||
// expectedDualGraph2.push_back(JacobianFactor(3, 10*eye(2), zero(2)));
|
||||
// EXPECT(assert_equal(expectedDualGraph2, *dualGraph2));
|
||||
//}
|
||||
//
|
||||
///* ************************************************************************* */
|
||||
//TEST( GaussianFactorGraph, buildDualGraph3 )
|
||||
//{
|
||||
// GaussianFactorGraph fgc3 = createMultiConstraintGraph();
|
||||
// KeySet constrainedVariables3 = list_of(0)(1)(2);
|
||||
// VariableIndex variableIndex3(fgc3);
|
||||
// VectorValues delta3 = createMultiConstraintValues();
|
||||
// GaussianFactorGraph::shared_ptr dualGraph3 = fgc3.buildDualGraph(
|
||||
// constrainedVariables3, variableIndex3, delta3);
|
||||
// GaussianFactorGraph expectedDualGraph3;
|
||||
// expectedDualGraph3.push_back(
|
||||
// JacobianFactor(3, (Matrix(2, 2) << 1, 2, 2, 1), 4,
|
||||
// (Matrix(2, 2) << 3, -1, 4, -2), zero(2)));
|
||||
// expectedDualGraph3.push_back(JacobianFactor(3, 10*eye(2), zero(2)));
|
||||
// expectedDualGraph3.push_back(
|
||||
// JacobianFactor(4, (Matrix(2, 2) << 1, 1, 1, 2), zero(2)));
|
||||
// EXPECT(assert_equal(expectedDualGraph3, *dualGraph3));
|
||||
//}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
|
|
Loading…
Reference in New Issue