refactor QPSolver inprogress... Compiled but tests failed.

release/4.3a0
thduynguyen 2014-12-09 06:13:57 -05:00
parent 8c4705b905
commit 001794ac84
10 changed files with 912 additions and 588 deletions

View File

@ -28,21 +28,30 @@ void LPSolver::buildMetaInformation() {
freeVars_.insert(key); freeVars_.insert(key);
} }
// Now collect remaining keys in constraints // Now collect remaining keys in constraints
VariableIndex factorIndex(*constraints_); VariableIndex factorIndex(*equalities_);
BOOST_FOREACH(Key key, factorIndex | boost::adaptors::map_keys) { BOOST_FOREACH(Key key, factorIndex | boost::adaptors::map_keys) {
if (!variableColumnNo_.count(key)) { if (!variableColumnNo_.count(key)) {
JacobianFactor::shared_ptr jacobian = boost::dynamic_pointer_cast< LinearEquality::shared_ptr factor = equalities_->at(*factorIndex[key].begin());
JacobianFactor>(constraints_->at(*factorIndex[key].begin())); size_t dim = factor->getDim(factor->find(key));
if (!jacobian || !jacobian->isConstrained()) {
throw runtime_error("Invalid constrained graph!");
}
size_t dim = jacobian->getDim(jacobian->find(key));
variableColumnNo_.insert(make_pair(key, firstVarIndex)); variableColumnNo_.insert(make_pair(key, firstVarIndex));
variableDims_.insert(make_pair(key, dim)); variableDims_.insert(make_pair(key, dim));
firstVarIndex += variableDims_[key]; firstVarIndex += variableDims_[key];
freeVars_.insert(key); freeVars_.insert(key);
} }
} }
VariableIndex factorIndex2(*inequalities_);
BOOST_FOREACH(Key key, factorIndex2 | boost::adaptors::map_keys) {
if (!variableColumnNo_.count(key)) {
LinearInequality::shared_ptr factor = inequalities_->at(*factorIndex2[key].begin());
size_t dim = factor->getDim(factor->find(key));
variableColumnNo_.insert(make_pair(key, firstVarIndex));
variableDims_.insert(make_pair(key, dim));
firstVarIndex += variableDims_[key];
freeVars_.insert(key);
}
}
// Collect the remaining keys in lowerBounds // Collect the remaining keys in lowerBounds
BOOST_FOREACH(Key key, lowerBounds_ | boost::adaptors::map_keys) { BOOST_FOREACH(Key key, lowerBounds_ | boost::adaptors::map_keys) {
if (!variableColumnNo_.count(key)) { if (!variableColumnNo_.count(key)) {
@ -67,7 +76,7 @@ void LPSolver::buildMetaInformation() {
/* ************************************************************************* */ /* ************************************************************************* */
void LPSolver::addConstraints(const boost::shared_ptr<lprec>& lp, void LPSolver::addConstraints(const boost::shared_ptr<lprec>& lp,
const JacobianFactor::shared_ptr& jacobian) const { const JacobianFactor::shared_ptr& jacobian, int constraintType) const {
if (!jacobian || !jacobian->isConstrained()) if (!jacobian || !jacobian->isConstrained())
throw runtime_error("LP only accepts constrained factors!"); throw runtime_error("LP only accepts constrained factors!");
@ -76,7 +85,6 @@ void LPSolver::addConstraints(const boost::shared_ptr<lprec>& lp,
vector<int> columnNo = buildColumnNo(keys); vector<int> columnNo = buildColumnNo(keys);
// Add each row to lp one by one. TODO: is there a faster way? // Add each row to lp one by one. TODO: is there a faster way?
Vector sigmas = jacobian->get_model()->sigmas();
Matrix A = jacobian->getA(); Matrix A = jacobian->getA();
Vector b = jacobian->getb(); Vector b = jacobian->getb();
for (int i = 0; i < A.rows(); ++i) { for (int i = 0; i < A.rows(); ++i) {
@ -88,11 +96,6 @@ void LPSolver::addConstraints(const boost::shared_ptr<lprec>& lp,
// so we have to make a new copy for every row!!!!! // so we have to make a new copy for every row!!!!!
vector<int> columnNoCopy(columnNo); vector<int> columnNoCopy(columnNo);
if (sigmas[i] > 0) {
cout << "Warning: Ignore Gaussian noise (sigma>0) in LP constraints!"
<< endl;
}
int constraintType = (sigmas[i] < 0) ? LE : EQ;
if (!add_constraintex(lp.get(), columnNoCopy.size(), r.data(), if (!add_constraintex(lp.get(), columnNoCopy.size(), r.data(),
columnNoCopy.data(), constraintType, b[i])) columnNoCopy.data(), constraintType, b[i]))
throw runtime_error("LP can't accept Gaussian noise!"); throw runtime_error("LP can't accept Gaussian noise!");
@ -132,13 +135,17 @@ boost::shared_ptr<lprec> LPSolver::buildModel() const {
// Makes building the model faster if it is done rows by row // Makes building the model faster if it is done rows by row
set_add_rowmode(lp.get(), TRUE); set_add_rowmode(lp.get(), TRUE);
// Add constraints // Add equality constraints
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, *constraints_) { BOOST_FOREACH(const LinearEquality::shared_ptr& factor, *equalities_) {
JacobianFactor::shared_ptr jacobian = boost::dynamic_pointer_cast< addConstraints(lp, factor, EQ);
JacobianFactor>(factor);
addConstraints(lp, jacobian);
} }
// Add inequality constraints
BOOST_FOREACH(const LinearInequality::shared_ptr& factor, *inequalities_) {
addConstraints(lp, factor, LE);
}
// Add bounds // Add bounds
addBounds(lp); addBounds(lp);

View File

@ -7,9 +7,10 @@
#pragma once #pragma once
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/VectorValues.h> #include <gtsam/linear/VectorValues.h>
#include <gtsam/inference/Symbol.h> #include <gtsam/inference/Symbol.h>
#include <gtsam_unstable/linear/LinearEqualityFactorGraph.h>
#include <gtsam_unstable/linear/LinearInequalityFactorGraph.h>
#include <gtsam/3rdparty/lp_solve_5.5/lp_lib.h> #include <gtsam/3rdparty/lp_solve_5.5/lp_lib.h>
@ -25,7 +26,8 @@ namespace gtsam {
*/ */
class LPSolver { class LPSolver {
VectorValues objectiveCoeffs_; VectorValues objectiveCoeffs_;
GaussianFactorGraph::shared_ptr constraints_; LinearEqualityFactorGraph::shared_ptr equalities_;
LinearInequalityFactorGraph::shared_ptr inequalities_;
VectorValues lowerBounds_, upperBounds_; VectorValues lowerBounds_, upperBounds_;
std::map<Key, size_t> variableColumnNo_, variableDims_; std::map<Key, size_t> variableColumnNo_, variableDims_;
size_t nrColumns_; size_t nrColumns_;
@ -38,11 +40,12 @@ public:
* set as unbounded, i.e. -inf <= x <= inf. * set as unbounded, i.e. -inf <= x <= inf.
*/ */
LPSolver(const VectorValues& objectiveCoeffs, LPSolver(const VectorValues& objectiveCoeffs,
const GaussianFactorGraph::shared_ptr& constraints, const LinearEqualityFactorGraph::shared_ptr& equalities,
const LinearInequalityFactorGraph::shared_ptr& inequalities,
const VectorValues& lowerBounds = VectorValues(), const VectorValues& lowerBounds = VectorValues(),
const VectorValues& upperBounds = VectorValues()) : const VectorValues& upperBounds = VectorValues()) :
objectiveCoeffs_(objectiveCoeffs), constraints_(constraints), lowerBounds_( objectiveCoeffs_(objectiveCoeffs), equalities_(equalities), inequalities_(
lowerBounds), upperBounds_(upperBounds) { inequalities), lowerBounds_(lowerBounds), upperBounds_(upperBounds) {
buildMetaInformation(); buildMetaInformation();
} }
@ -73,7 +76,7 @@ public:
template<class KEYLIST> template<class KEYLIST>
std::vector<int> buildColumnNo(const KEYLIST& keyList) const { std::vector<int> buildColumnNo(const KEYLIST& keyList) const {
std::vector<int> columnNo; std::vector<int> columnNo;
BOOST_FOREACH(Key key, keyList) { BOOST_FOREACH(Key key, keyList){
std::vector<int> varIndices = boost::copy_range<std::vector<int> >( std::vector<int> varIndices = boost::copy_range<std::vector<int> >(
boost::irange(variableColumnNo_.at(key), boost::irange(variableColumnNo_.at(key),
variableColumnNo_.at(key) + variableDims_.at(key))); variableColumnNo_.at(key) + variableDims_.at(key)));
@ -84,7 +87,7 @@ public:
/// Add all [scalar] constraints in a constrained Jacobian factor to lp /// Add all [scalar] constraints in a constrained Jacobian factor to lp
void addConstraints(const boost::shared_ptr<lprec>& lp, void addConstraints(const boost::shared_ptr<lprec>& lp,
const JacobianFactor::shared_ptr& jacobian) const; const JacobianFactor::shared_ptr& jacobian, int type) const;
/** /**
* Add all bounds to lp. * Add all bounds to lp.

View File

@ -0,0 +1,140 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/*
* LinearEquality.h
* @brief: LinearEquality derived from Base with constrained noise model
* @date: Nov 27, 2014
* @author: thduynguyen
*/
#pragma once
#include <gtsam/linear/JacobianFactor.h>
namespace gtsam {
/**
* This class defines Linear constraints by inherit Base
* with the special Constrained noise model
*/
class LinearEquality: public JacobianFactor {
public:
typedef LinearEquality This; ///< Typedef to this class
typedef JacobianFactor Base; ///< Typedef to base class
typedef boost::shared_ptr<This> shared_ptr; ///< shared_ptr to this class
private:
Key dualKey_;
public:
/** default constructor for I/O */
LinearEquality() :
Base() {
}
/** Conversion from HessianFactor (does Cholesky to obtain Jacobian matrix) */
explicit LinearEquality(const HessianFactor& hf) {
throw std::runtime_error("Cannot convert HessianFactor to LinearEquality");
}
/** Construct unary factor */
LinearEquality(Key i1, const Matrix& A1, const Vector& b, Key dualKey) :
Base(i1, A1, b, noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
}
/** Construct binary factor */
LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2,
const Vector& b, Key dualKey) :
Base(i1, A1, i2, A2, b, noiseModel::Constrained::All(b.rows())), dualKey_(
dualKey) {
}
/** Construct ternary factor */
LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
const Matrix& A3, const Vector& b, Key dualKey) :
Base(i1, A1, i2, A2, i3, A3, b, noiseModel::Constrained::All(b.rows())), dualKey_(
dualKey) {
}
/** Construct four-ary factor */
LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
const Matrix& A3, Key i4, const Matrix& A4, const Vector& b, Key dualKey) :
Base(i1, A1, i2, A2, i3, A3, i4, A4, b,
noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
}
/** Construct five-ary factor */
LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
const Matrix& A3, Key i4, const Matrix& A4, Key i5, const Matrix& A5,
const Vector& b, Key dualKey) :
Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, b,
noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
}
/** Construct six-ary factor */
LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
const Matrix& A3, Key i4, const Matrix& A4, Key i5, const Matrix& A5,
Key i6, const Matrix& A6, const Vector& b, Key dualKey) :
Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, i6, A6, b,
noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
}
/** Construct an n-ary factor
* @tparam TERMS A container whose value type is std::pair<Key, Matrix>, specifying the
* collection of keys and matrices making up the factor. */
template<typename TERMS>
LinearEquality(const TERMS& terms, const Vector& b, Key dualKey) :
Base(terms, b, noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
}
/** Virtual destructor */
virtual ~LinearEquality() {
}
/** equals */
virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const {
return Base::equals(lf, tol);
}
/** print */
virtual void print(const std::string& s = "", const KeyFormatter& formatter =
DefaultKeyFormatter) const {
Base::print(s, formatter);
}
/** Clone this LinearEquality */
virtual GaussianFactor::shared_ptr clone() const {
return boost::static_pointer_cast<GaussianFactor>(
boost::make_shared<LinearEquality>(*this));
}
/// dual key
Key dualKey() const { return dualKey_; }
/** Special error_vector for constraints (A*x-b) */
Vector error_vector(const VectorValues& c) const {
return unweighted_error(c);
}
/** Special error for constraints.
* I think it should be zero, as this function is meant for objective cost.
* But the name "error" can be misleading.
* TODO: confirm with Frank!! */
virtual double error(const VectorValues& c) const {
return 0.0;
}
};
// LinearEquality
}// gtsam

View File

@ -0,0 +1,32 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/*
* LinearEqualityFactorGraph.h
* @brief: Factor graph of all LinearEquality factors
* @date: Dec 8, 2014
* @author: thduynguyen
*/
#pragma once
#include <gtsam/inference/FactorGraph.h>
#include <gtsam_unstable/linear/LinearEquality.h>
namespace gtsam {
class LinearEqualityFactorGraph : public FactorGraph<LinearEquality> {
public:
typedef boost::shared_ptr<LinearEqualityFactorGraph> shared_ptr;
};
} // namespace gtsam

View File

@ -0,0 +1,152 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/*
* LinearInequality.h
* @brief: LinearInequality derived from Base with constrained noise model
* @date: Nov 27, 2014
* @author: thduynguyen
*/
#pragma once
#include <gtsam/linear/JacobianFactor.h>
namespace gtsam {
/**
* This class defines Linear constraints by inherit Base
* with the special Constrained noise model
*/
class LinearInequality: public JacobianFactor {
public:
typedef LinearInequality This; ///< Typedef to this class
typedef JacobianFactor Base; ///< Typedef to base class
typedef boost::shared_ptr<This> shared_ptr; ///< shared_ptr to this class
private:
Key dualKey_;
public:
/** default constructor for I/O */
LinearInequality() :
Base() {
}
/** Conversion from HessianFactor (does Cholesky to obtain Jacobian matrix) */
explicit LinearInequality(const HessianFactor& hf) {
throw std::runtime_error(
"Cannot convert HessianFactor to LinearInequality");
}
/** Construct unary factor */
LinearInequality(Key i1, const Matrix& A1, const Vector& b, Key dualKey) :
Base(i1, A1, b, noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
}
/** Construct binary factor */
LinearInequality(Key i1, const Matrix& A1, Key i2, const Matrix& A2,
const Vector& b, Key dualKey) :
Base(i1, A1, i2, A2, b, noiseModel::Constrained::All(b.rows())), dualKey_(
dualKey) {
}
/** Construct ternary factor */
LinearInequality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
const Matrix& A3, const Vector& b, Key dualKey) :
Base(i1, A1, i2, A2, i3, A3, b, noiseModel::Constrained::All(b.rows())), dualKey_(
dualKey) {
}
/** Construct four-ary factor */
LinearInequality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
const Matrix& A3, Key i4, const Matrix& A4, const Vector& b, Key dualKey) :
Base(i1, A1, i2, A2, i3, A3, i4, A4, b,
noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
}
/** Construct five-ary factor */
LinearInequality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
const Matrix& A3, Key i4, const Matrix& A4, Key i5, const Matrix& A5,
const Vector& b, Key dualKey) :
Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, b,
noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
}
/** Construct six-ary factor */
LinearInequality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3,
const Matrix& A3, Key i4, const Matrix& A4, Key i5, const Matrix& A5,
Key i6, const Matrix& A6, const Vector& b, Key dualKey) :
Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, i6, A6, b,
noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
}
/** Construct an n-ary factor
* @tparam TERMS A container whose value type is std::pair<Key, Matrix>, specifying the
* collection of keys and matrices making up the factor. */
template<typename TERMS>
LinearInequality(const TERMS& terms, const Vector& b, Key dualKey) :
Base(terms, b, noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) {
}
/** Virtual destructor */
virtual ~LinearInequality() {
}
/** equals */
virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const {
return Base::equals(lf, tol);
}
/** print */
virtual void print(const std::string& s = "", const KeyFormatter& formatter =
DefaultKeyFormatter) const {
Base::print(s, formatter);
}
/** Clone this LinearInequality */
virtual GaussianFactor::shared_ptr clone() const {
return boost::static_pointer_cast<GaussianFactor>(
boost::make_shared<LinearInequality>(*this));
}
/// dual key
Key dualKey() const { return dualKey_; }
/** Special error_vector for constraints (A*x-b) */
Vector error_vector(const VectorValues& c) const {
return unweighted_error(c);
}
/** Special error for constraints.
* I think it should be zero, as this function is meant for objective cost.
* But the name "error" can be misleading.
* TODO: confirm with Frank!! */
virtual double error(const VectorValues& c) const {
return 0.0;
}
/** dot product of row s with the corresponding vector in p */
double dotProductRow(size_t s, const VectorValues& p) const {
double ajTp = 0.0;
for (const_iterator xj = begin(); xj != end(); ++xj) {
Vector pj = p.at(*xj);
Vector aj = getA(xj).row(s);
ajTp += aj.dot(pj);
}
return ajTp;
}
};
// LinearInequality
}// gtsam

View File

@ -0,0 +1,48 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/*
* LinearInequalityFactorGraph.h
* @brief: Factor graph of all LinearInequality factors
* @date: Dec 8, 2014
* @author: thduynguyen
*/
#pragma once
#include <gtsam/inference/FactorGraph.h>
#include <gtsam/inference/FactorGraph-inst.h>
#include <gtsam_unstable/linear/LinearInequality.h>
namespace gtsam {
class LinearInequalityFactorGraph: public FactorGraph<LinearInequality> {
private:
typedef FactorGraph<LinearInequality> Base;
public:
typedef boost::shared_ptr<LinearInequalityFactorGraph> shared_ptr;
/** print */
void print(const std::string& str, const KeyFormatter& keyFormatter =
DefaultKeyFormatter) const {
Base::print(str, keyFormatter);
}
/** equals */
bool equals(const LinearInequalityFactorGraph& other,
double tol = 1e-9) const {
return Base::equals(other, tol);
}
};
} // namespace gtsam

View File

@ -0,0 +1,58 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/*
* QP.h
* @brief: Factor graphs of a Quadratic Programming problem
* @date: Dec 8, 2014
* @author: thduynguyen
*/
#pragma once
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam_unstable/linear/LinearEqualityFactorGraph.h>
#include <gtsam_unstable/linear/LinearInequalityFactorGraph.h>
namespace gtsam {
/**
* struct contains factor graphs of a Quadratic Programming problem
*/
struct QP {
GaussianFactorGraph cost; //!< Quadratic cost factors
LinearEqualityFactorGraph equalities; //!< linear equality constraints
LinearInequalityFactorGraph inequalities; //!< linear inequality constraints
/** default constructor */
QP() :
cost(), equalities(), inequalities() {
}
/** constructor */
QP(const GaussianFactorGraph& _cost,
const LinearEqualityFactorGraph& _linearEqualities,
const LinearInequalityFactorGraph& _linearInequalities) :
cost(_cost), equalities(_linearEqualities), inequalities(
_linearInequalities) {
}
/** print */
void print(const std::string& s = "") {
std::cout << s << std::endl;
cost.print("Quadratic cost factors: ");
equalities.print("Linear equality factors: ");
inequalities.print("Linear inequality factors: ");
}
};
} // namespace gtsam

View File

@ -6,255 +6,85 @@
*/ */
#include <gtsam/inference/Symbol.h> #include <gtsam/inference/Symbol.h>
#include <gtsam/inference/FactorGraph-inst.h>
#include <gtsam_unstable/linear/QPSolver.h> #include <gtsam_unstable/linear/QPSolver.h>
#include <gtsam_unstable/linear/LPSolver.h> #include <gtsam_unstable/linear/LPSolver.h>
#include <boost/foreach.hpp>
#include <boost/range/adaptor/map.hpp> #include <boost/range/adaptor/map.hpp>
using namespace std; using namespace std;
#define ACTIVE 0.0
#define INACTIVE std::numeric_limits<double>::infinity()
namespace gtsam { namespace gtsam {
/// Convert a Gaussian factor to a jacobian. return empty shared ptr if failed //******************************************************************************
static JacobianFactor::shared_ptr toJacobian( QPSolver::QPSolver(const QP& qp) : qp_(qp) {
const GaussianFactor::shared_ptr& factor) { baseGraph_ = qp_.cost;
JacobianFactor::shared_ptr jacobian( baseGraph_.push_back(qp_.equalities.begin(), qp_.equalities.end());
boost::dynamic_pointer_cast<JacobianFactor>(factor)); costVariableIndex_ = VariableIndex(qp_.cost);
return jacobian; equalityVariableIndex_ = VariableIndex(qp_.equalities);
inequalityVariableIndex_ = VariableIndex(qp_.inequalities);
constrainedKeys_ = qp_.equalities.keys();
constrainedKeys_.merge(qp_.inequalities.keys());
} }
//****************************************************************************** //******************************************************************************
QPSolver::QPSolver(const GaussianFactorGraph& graph) : VectorValues QPSolver::solveWithCurrentWorkingSet(
graph_(graph), fullFactorIndices_(graph) { const LinearInequalityFactorGraph& workingSet) const {
// Split the original graph into unconstrained and constrained part GaussianFactorGraph workingGraph = baseGraph_;
// and collect indices of constrained factors workingGraph.push_back(workingSet);
for (size_t i = 0; i < graph.nrFactors(); ++i) { return workingGraph.optimize();
// obtain the factor and its noise model
JacobianFactor::shared_ptr jacobian = toJacobian(graph.at(i));
if (jacobian && jacobian->get_model()
&& jacobian->get_model()->isConstrained()) {
constraintIndices_.push_back(i);
}
}
// Collect constrained variable keys
set<size_t> constrainedVars;
BOOST_FOREACH(size_t index, constraintIndices_) {
KeyVector keys = graph.at(index)->keys();
constrainedVars.insert(keys.begin(), keys.end());
}
// Collect unconstrained hessians of constrained vars to build dual graph
findUnconstrainedHessiansOfConstrainedVars(constrainedVars);
freeHessianFactorIndex_ = VariableIndex(freeHessians_);
} }
//****************************************************************************** //******************************************************************************
void QPSolver::findUnconstrainedHessiansOfConstrainedVars( JacobianFactor::shared_ptr QPSolver::createDualFactor(Key key,
const set<Key>& constrainedVars) { const LinearInequalityFactorGraph& workingSet, const VectorValues& delta) const {
VariableIndex variableIndex(graph_);
// Collect all factors involving constrained vars // Transpose the A matrix of constrained factors to have the jacobian of the dual key
FastSet<size_t> factors; std::vector<std::pair<Key, Matrix> > Aterms = collectDualJacobians
BOOST_FOREACH(Key key, constrainedVars) { < LinearEquality > (key, qp_.equalities, equalityVariableIndex_);
VariableIndex::Factors factorsOfThisVar = variableIndex[key]; std::vector<std::pair<Key, Matrix> > AtermsInequalities = collectDualJacobians
BOOST_FOREACH(size_t factorIndex, factorsOfThisVar) { < LinearInequality > (key, workingSet, inequalityVariableIndex_);
factors.insert(factorIndex); Aterms.insert(Aterms.end(), AtermsInequalities.begin(),
} AtermsInequalities.end());
}
// Convert each factor into Hessian // Collect the gradients of unconstrained cost factors to the b vector
BOOST_FOREACH(size_t factorIndex, factors) { Vector b = zero(delta.at(key).size());
GaussianFactor::shared_ptr gf = graph_[factorIndex]; BOOST_FOREACH(size_t factorIx, costVariableIndex_[key]) {
if (!gf) GaussianFactor::shared_ptr factor = qp_.cost.at(factorIx);
continue; b += factor->gradient(key, delta);
// See if this is a Jacobian factor
JacobianFactor::shared_ptr jf = //
boost::dynamic_pointer_cast<JacobianFactor>(gf);
if (jf) {
// Dealing with mixed constrained factor
if (jf->get_model() && jf->isConstrained()) {
// Turn a mixed-constrained factor into a factor with 0 information on the constrained part
Vector sigmas = jf->get_model()->sigmas();
Vector newPrecisions(sigmas.size());
bool mixed = false;
for (size_t s = 0; s < sigmas.size(); ++s) {
if (sigmas[s] <= 1e-9)
newPrecisions[s] = 0.0; // 0 info for constraints (both inequality and eq)
else {
newPrecisions[s] = 1.0 / sigmas[s];
mixed = true;
}
}
if (mixed) { // only add free hessians if it's mixed
JacobianFactor::shared_ptr newJacobian = toJacobian(jf->clone());
newJacobian->setModel(
noiseModel::Diagonal::Precisions(newPrecisions));
freeHessians_.push_back(HessianFactor(*newJacobian));
}
} else { // unconstrained Jacobian
// Convert the original linear factor to Hessian factor
// TODO: This may fail and throw the following exception
// Assertion failed: (((!PanelMode) && stride==0 && offset==0) ||
// (PanelMode && stride>=depth && offset<=stride)), function operator(),
// file Eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h, line 1133.
// because of a weird error which might be related to clang
// See this: https://groups.google.com/forum/#!topic/ceres-solver/DYhqOLPquHU
// My current way to fix this is to compile both gtsam and my library in Release mode
freeHessians_.add(HessianFactor(*jf));
}
} else { // If it's not a Jacobian, it should be a hessian factor. Just add!
HessianFactor::shared_ptr hf = //
boost::dynamic_pointer_cast<HessianFactor>(gf);
if (hf)
freeHessians_.push_back(hf);
}
} }
return boost::make_shared<JacobianFactor>(Aterms, b);
} }
//****************************************************************************** //******************************************************************************
GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph, GaussianFactorGraph::shared_ptr QPSolver::buildDualGraph(
const VectorValues& x0, bool useLeastSquare) const { const LinearInequalityFactorGraph& workingSet, const VectorValues& delta) const {
static const bool debug = false; GaussianFactorGraph::shared_ptr dualGraph(new GaussianFactorGraph());
BOOST_FOREACH(Key key, constrainedKeys_) {
// The dual graph to return // Each constrained key becomes a factor in the dual graph
GaussianFactorGraph dualGraph; dualGraph->push_back(createDualFactor(key, workingSet, delta));
// For each variable xi involving in some constraint, compute the unconstrained gradient
// wrt xi from the prebuilt freeHessian graph
// \grad f(xi) = \frac{\partial f}{\partial xi}' = \sum_j G_ij*xj - gi
if (debug)
freeHessianFactorIndex_.print("freeHessianFactorIndex_: ");
BOOST_FOREACH(const VariableIndex::value_type& xiKey_factors, freeHessianFactorIndex_) {
Key xiKey = xiKey_factors.first;
VariableIndex::Factors xiFactors = xiKey_factors.second;
// Find xi's dim from the first factor on xi
if (xiFactors.size() == 0)
continue;
GaussianFactor::shared_ptr xiFactor0 = freeHessians_.at(*xiFactors.begin());
size_t xiDim = xiFactor0->getDim(xiFactor0->find(xiKey));
if (debug)
xiFactor0->print("xiFactor0: ");
if (debug)
cout << "xiKey: " << string(Symbol(xiKey)) << ", xiDim: " << xiDim
<< endl;
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Compute the b-vector for the dual factor Ax-b
// b = gradf(xi) = \frac{\partial f}{\partial xi}' = \sum_j G_ij*xj - gi
Vector gradf_xi = zero(xiDim);
BOOST_FOREACH(size_t factorIx, xiFactors) {
HessianFactor::shared_ptr factor = freeHessians_.at(factorIx);
Factor::const_iterator xi = factor->find(xiKey);
// Sum over Gij*xj for all xj connecting to xi
for (Factor::const_iterator xj = factor->begin(); xj != factor->end();
++xj) {
// Obtain Gij from the Hessian factor
// Hessian factor only stores an upper triangular matrix, so be careful when i>j
Matrix Gij;
if (xi > xj) {
Matrix Gji = factor->info(xj, xi);
Gij = Gji.transpose();
} else {
Gij = factor->info(xi, xj);
} }
// Accumulate Gij*xj to gradf
Vector x0_j = x0.at(*xj);
gradf_xi += Gij * x0_j;
}
// Subtract the linear term gi
gradf_xi += -factor->linearTerm(xi);
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Compute the Jacobian A for the dual factor Ax-b
// Obtain the jacobians for lambda variables from their corresponding constraints
// A = gradc_k(xi) = \frac{\partial c_k}{\partial xi}'
vector<pair<Key, Matrix> > lambdaTerms; // collection of lambda_k, and gradc_k
typedef pair<size_t, size_t> FactorIx_SigmaIx;
vector<FactorIx_SigmaIx> unconstrainedIndex; // pairs of factorIx,sigmaIx of unconstrained rows
BOOST_FOREACH(size_t factorIndex, fullFactorIndices_[xiKey]) {
JacobianFactor::shared_ptr factor = toJacobian(graph.at(factorIndex));
if (!factor || !factor->isConstrained())
continue;
// Gradient is the transpose of the Jacobian: A_k = gradc_k(xi) = \frac{\partial c_k}{\partial xi}'
// Each column for each lambda_k corresponds to [the transpose of] each constrained row factor
Matrix A_k = factor->getA(factor->find(xiKey)).transpose();
if (debug)
gtsam::print(A_k, "A_k = ");
// Deal with mixed sigmas: no information if sigma != 0
Vector sigmas = factor->get_model()->sigmas();
for (size_t sigmaIx = 0; sigmaIx < sigmas.size(); ++sigmaIx) {
// if it's either inequality (sigma<0) or unconstrained (sigma>0)
// we have no information about it
if (fabs(sigmas[sigmaIx]) > 1e-9) {
A_k.col(sigmaIx) = zero(A_k.rows());
// remember to add a zero prior on this lambda, otherwise the graph is under-determined
unconstrainedIndex.push_back(make_pair(factorIndex, sigmaIx));
}
}
// Use factorIndex as the lambda's key.
lambdaTerms.push_back(make_pair(factorIndex, A_k));
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Create and add factors to the dual graph
// If least square approximation is desired, use unit noise model.
if (debug)
cout << "Create dual factor" << endl;
if (useLeastSquare) {
if (debug)
cout << "use least square!" << endl;
dualGraph.push_back(
JacobianFactor(lambdaTerms, gradf_xi,
noiseModel::Unit::Create(gradf_xi.size())));
} else {
// Enforce constrained noise model so lambdas are solved with QR
// and should exactly satisfy all the equations
if (debug)
cout << gradf_xi << endl;
dualGraph.push_back(
JacobianFactor(lambdaTerms, gradf_xi,
noiseModel::Constrained::All(gradf_xi.size())));
}
// Add 0 priors on all lambdas of the unconstrained rows to make sure the graph is solvable
if (debug)
cout << "Create priors" << endl;
BOOST_FOREACH(FactorIx_SigmaIx factorIx_sigmaIx, unconstrainedIndex) {
size_t factorIx = factorIx_sigmaIx.first;
JacobianFactor::shared_ptr factor = toJacobian(graph.at(factorIx));
size_t dim = factor->get_model()->dim();
Matrix J = zeros(dim, dim);
size_t sigmaIx = factorIx_sigmaIx.second;
J(sigmaIx, sigmaIx) = 1.0;
// Use factorIndex as the lambda's key.
if (debug)
cout << "prior for factor " << factorIx << endl;
dualGraph.push_back(JacobianFactor(factorIx, J, zero(dim)));
}
}
return dualGraph; return dualGraph;
} }
//****************************************************************************** //******************************************************************************
pair<int, int> QPSolver::identifyLeavingConstraint( pair<int, int> QPSolver::identifyLeavingConstraint(
const LinearInequalityFactorGraph& workingSet,
const VectorValues& lambdas) const { const VectorValues& lambdas) const {
int worstFactorIx = -1, worstSigmaIx = -1; int worstFactorIx = -1, worstSigmaIx = -1;
// preset the maxLambda to 0.0: if lambda is <= 0.0, the constraint is either // preset the maxLambda to 0.0: if lambda is <= 0.0, the constraint is either
// inactive or a good inequality constraint, so we don't care! // inactive or a good inequality constraint, so we don't care!
double maxLambda = 0.0; double maxLambda = 0.0;
BOOST_FOREACH(size_t factorIx, constraintIndices_) { for (size_t factorIx = 0; factorIx < workingSet.size(); ++factorIx) {
Vector lambda = lambdas.at(factorIx); const LinearInequality::shared_ptr& factor = workingSet.at(factorIx);
Vector orgSigmas = toJacobian(graph_.at(factorIx))->get_model()->sigmas(); Vector lambda = lambdas.at(factor->dualKey());
for (size_t j = 0; j < orgSigmas.size(); ++j) Vector sigmas = factor->get_model()->sigmas();
// If it is a BAD active inequality, and lambda is larger than the current max for (size_t j = 0; j < sigmas.size(); ++j)
if (orgSigmas[j] < 0 && lambda[j] > maxLambda) { // If it is an active constraint, and lambda is larger than the current max
if (sigmas[j] == ACTIVE && lambda[j] > maxLambda) {
worstFactorIx = factorIx; worstFactorIx = factorIx;
worstSigmaIx = j; worstSigmaIx = j;
maxLambda = lambda[j]; maxLambda = lambda[j];
@ -264,14 +94,16 @@ pair<int, int> QPSolver::identifyLeavingConstraint(
} }
//****************************************************************************** //******************************************************************************
bool QPSolver::updateWorkingSetInplace(GaussianFactorGraph& workingGraph, LinearInequalityFactorGraph QPSolver::updateWorkingSet(
int factorIx, int sigmaIx, double newSigma) const { const LinearInequalityFactorGraph& workingSet, int factorIx, int sigmaIx,
double state) const {
LinearInequalityFactorGraph newWorkingSet = workingSet;
if (factorIx < 0 || sigmaIx < 0) if (factorIx < 0 || sigmaIx < 0)
return false; return newWorkingSet;
Vector sigmas = toJacobian(workingGraph.at(factorIx))->get_model()->sigmas(); Vector sigmas = newWorkingSet.at(factorIx)->get_model()->sigmas();
sigmas[sigmaIx] = newSigma; // removing it from the working set sigmas[sigmaIx] = state;
toJacobian(workingGraph.at(factorIx))->setModel(true, sigmas); newWorkingSet.at(factorIx)->setModel(true, sigmas);
return true; return newWorkingSet;
} }
//****************************************************************************** //******************************************************************************
@ -291,44 +123,28 @@ bool QPSolver::updateWorkingSetInplace(GaussianFactorGraph& workingGraph,
* We want the minimum of all those alphas among all inactive inequality. * We want the minimum of all those alphas among all inactive inequality.
*/ */
boost::tuple<double, int, int> QPSolver::computeStepSize( boost::tuple<double, int, int> QPSolver::computeStepSize(
const GaussianFactorGraph& workingGraph, const VectorValues& xk, const LinearInequalityFactorGraph& workingSet, const VectorValues& xk,
const VectorValues& p) const { const VectorValues& p) const {
static bool debug = false; static bool debug = false;
double minAlpha = 1.0; double minAlpha = 1.0;
int closestFactorIx = -1, closestSigmaIx = -1; int closestFactorIx = -1, closestSigmaIx = -1;
BOOST_FOREACH(size_t factorIx, constraintIndices_) { for(size_t factorIx = 0; factorIx<workingSet.size(); ++factorIx) {
JacobianFactor::shared_ptr jacobian = toJacobian(workingGraph.at(factorIx)); const LinearInequality::shared_ptr& factor = workingSet.at(factorIx);
Vector sigmas = jacobian->get_model()->sigmas(); Vector sigmas = factor->get_model()->sigmas();
Vector b = jacobian->getb(); Vector b = factor->getb();
for (size_t s = 0; s < sigmas.size(); ++s) { for (size_t s = 0; s < sigmas.size(); ++s) {
// If it is an inactive inequality, compute alpha and update min // If it is an inactive inequality, compute alpha and update min
if (sigmas[s] < 0) { if (sigmas[s] == INACTIVE) {
// Compute aj'*p // Compute aj'*p
double ajTp = 0.0; double ajTp = factor->dotProductRow(s, p);
for (Factor::const_iterator xj = jacobian->begin();
xj != jacobian->end(); ++xj) {
Vector pj = p.at(*xj);
Vector aj = jacobian->getA(xj).row(s);
ajTp += aj.dot(pj);
}
if (debug)
cout << "s, ajTp, b[s]: " << s << " " << ajTp << " " << b[s] << endl;
// Check if aj'*p >0. Don't care if it's not. // Check if aj'*p >0. Don't care if it's not.
if (ajTp <= 0) if (ajTp <= 0)
continue; continue;
// Compute aj'*xk // Compute aj'*xk
double ajTx = 0.0; double ajTx = factor->dotProductRow(s, xk);
for (Factor::const_iterator xj = jacobian->begin();
xj != jacobian->end(); ++xj) {
Vector xkj = xk.at(*xj);
Vector aj = jacobian->getA(xj).row(s);
ajTx += aj.dot(xkj);
}
if (debug)
cout << "b[s], ajTx: " << b[s] << " " << ajTx << " " << ajTp << endl;
// alpha = (bj - aj'*xk) / (aj'*p) // alpha = (bj - aj'*xk) / (aj'*p)
double alpha = (b[s] - ajTx) / ajTp; double alpha = (b[s] - ajTx) / ajTp;
@ -348,171 +164,224 @@ boost::tuple<double, int, int> QPSolver::computeStepSize(
} }
//****************************************************************************** //******************************************************************************
bool QPSolver::iterateInPlace(GaussianFactorGraph& workingGraph, QPState QPSolver::iterate(const QPState& state) const {
VectorValues& currentSolution, VectorValues& lambdas) const {
static bool debug = false; static bool debug = false;
// Solve with the current working set
VectorValues newValues = solveWithCurrentWorkingSet(state.workingSet);
if (debug) if (debug)
workingGraph.print("workingGraph: "); newValues.print("New solution:");
// Obtain the solution from the current working graph
VectorValues newSolution = workingGraph.optimize();
if (debug)
newSolution.print("New solution:");
// If we CAN'T move further // If we CAN'T move further
if (newSolution.equals(currentSolution, 1e-5)) { if (newValues.equals(state.values, 1e-5)) {
// Compute lambda from the dual graph // Compute lambda from the dual graph
if (debug) if (debug)
cout << "Building dual graph..." << endl; cout << "Building dual graph..." << endl;
GaussianFactorGraph dualGraph = buildDualGraph(workingGraph, newSolution); GaussianFactorGraph::shared_ptr dualGraph = buildDualGraph(state.workingSet, newValues);
if (debug) if (debug)
dualGraph.print("Dual graph: "); dualGraph->print("Dual graph: ");
lambdas = dualGraph.optimize(); VectorValues duals = dualGraph->optimize();
if (debug) if (debug)
lambdas.print("lambdas :"); duals.print("Duals :");
int factorIx, sigmaIx; int leavingFactor, leavingSigmaIx;
boost::tie(factorIx, sigmaIx) = identifyLeavingConstraint(lambdas); boost::tie(leavingFactor, leavingSigmaIx) = //
identifyLeavingConstraint(state.workingSet, duals);
if (debug) if (debug)
cout << "violated active inequality - factorIx, sigmaIx: " << factorIx cout << "violated active inequality - factorIx, sigmaIx: " << leavingFactor
<< " " << sigmaIx << endl; << " " << leavingSigmaIx << endl;
// Try to de-activate the weakest violated inequality constraints // If all inequality constraints are satisfied: We have the solution!!
// if not successful, i.e. all inequality constraints are satisfied: if (leavingFactor < 0 || leavingSigmaIx < 0) {
// We have the solution!! return QPState(newValues, duals, state.workingSet, true);
if (!updateWorkingSetInplace(workingGraph, factorIx, sigmaIx, -1.0)) }
return true; else {
} else { // Inactivate the leaving constraint
LinearInequalityFactorGraph newWorkingSet = updateWorkingSet(
state.workingSet, leavingFactor, leavingSigmaIx, INACTIVE);
return QPState(newValues, duals, newWorkingSet, false);
}
}
else {
// If we CAN make some progress // If we CAN make some progress
// Adapt stepsize if some inactive constraints complain about this move // Adapt stepsize if some inactive constraints complain about this move
if (debug)
cout << "Computing stepsize..." << endl;
double alpha; double alpha;
int factorIx, sigmaIx; int factorIx, sigmaIx;
VectorValues p = newSolution - currentSolution; VectorValues p = newValues - state.values;
boost::tie(alpha, factorIx, sigmaIx) = // boost::tie(alpha, factorIx, sigmaIx) = //
computeStepSize(workingGraph, currentSolution, p); computeStepSize(state.workingSet, state.values, p);
if (debug) if (debug)
cout << "alpha, factorIx, sigmaIx: " << alpha << " " << factorIx << " " cout << "alpha, factorIx, sigmaIx: " << alpha << " " << factorIx << " "
<< sigmaIx << endl; << sigmaIx << endl;
// also add to the working set the one that complains the most // also add to the working set the one that complains the most
updateWorkingSetInplace(workingGraph, factorIx, sigmaIx, 0.0); LinearInequalityFactorGraph newWorkingSet = //
// step! updateWorkingSet(state.workingSet, factorIx, sigmaIx, ACTIVE);
currentSolution = currentSolution + alpha * p;
// if (alpha <1e-5) {
// if (debug) cout << "Building dual graph..." << endl;
// GaussianFactorGraph dualGraph = buildDualGraph(workingGraph, newSolution);
// if (debug) dualGraph.print("Dual graph: ");
// lambdas = dualGraph.optimize();
// if (debug) lambdas.print("lambdas :");
// return true; // TODO: HACK HACK!!!
// }
}
return false; // step!
newValues = state.values + alpha * p;
return QPState(newValues, state.duals, newWorkingSet, false);
}
} }
//****************************************************************************** //******************************************************************************
pair<VectorValues, VectorValues> QPSolver::optimize( pair<VectorValues, VectorValues> QPSolver::optimize(
const VectorValues& initialValues) const { const VectorValues& initialValues) const {
GaussianFactorGraph workingGraph = graph_.clone();
VectorValues currentSolution = initialValues; // TODO: initialize workingSet from the feasible initialValues
VectorValues lambdas; LinearInequalityFactorGraph workingSet(qp_.inequalities);
bool converged = false;
while (!converged) { QPState state(initialValues, VectorValues(), workingSet, false);
converged = iterateInPlace(workingGraph, currentSolution, lambdas);
/// main loop of the solver
while (!state.converged) {
state = iterate(state);
} }
return make_pair(currentSolution, lambdas);
return make_pair(state.values, state.duals);
} }
//****************************************************************************** //******************************************************************************
pair<VectorValues, Key> QPSolver::initialValuesLP() const { std::pair<bool, Key> QPSolver::maxKey(const FastSet<Key>& keys) const {
size_t firstSlackKey = 0; KeySet::iterator maxEl = std::max_element(keys.begin(), keys.end());
BOOST_FOREACH(Key key, fullFactorIndices_ | boost::adaptors::map_keys) { if (maxEl==keys.end())
if (firstSlackKey < key) return make_pair(false, 0);
firstSlackKey = key; return make_pair(true, *maxEl);
} }
//******************************************************************************
boost::tuple<VectorValues, Key, Key> QPSolver::initialValuesLP() const {
// Key for the first slack variable = maximum key + 1
size_t firstSlackKey;
bool found;
KeySet allKeys = qp_.cost.keys();
allKeys.merge(qp_.equalities.keys());
allKeys.merge(qp_.inequalities.keys());
boost::tie(found, firstSlackKey) = maxKey(allKeys);
firstSlackKey += 1; firstSlackKey += 1;
VectorValues initialValues; VectorValues initialValues;
// Create zero values for constrained vars // Create zero values for constrained vars
BOOST_FOREACH(size_t iFactor, constraintIndices_) { BOOST_FOREACH(const LinearEquality::shared_ptr& factor, qp_.equalities) {
JacobianFactor::shared_ptr jacobian = toJacobian(graph_.at(iFactor)); KeyVector keys = factor->keys();
KeyVector keys = jacobian->keys();
BOOST_FOREACH(Key key, keys) { BOOST_FOREACH(Key key, keys) {
if (!initialValues.exists(key)) { if (!initialValues.exists(key)) {
size_t dim = jacobian->getDim(jacobian->find(key)); size_t dim = factor->getDim(factor->find(key));
initialValues.insert(key, zero(dim));
}
}
}
BOOST_FOREACH(const LinearInequality::shared_ptr& factor, qp_.inequalities) {
KeyVector keys = factor->keys();
BOOST_FOREACH(Key key, keys) {
if (!initialValues.exists(key)) {
size_t dim = factor->getDim(factor->find(key));
initialValues.insert(key, zero(dim)); initialValues.insert(key, zero(dim));
} }
} }
} }
// Insert initial values for slack variables // Insert initial values for slack variables
size_t slackKey = firstSlackKey; Key slackKey = firstSlackKey;
BOOST_FOREACH(size_t iFactor, constraintIndices_) { // Equality: zi = |bi|
JacobianFactor::shared_ptr jacobian = toJacobian(graph_.at(iFactor)); BOOST_FOREACH(const LinearEquality::shared_ptr& factor, qp_.equalities) {
Vector errorAtZero = jacobian->getb(); Vector errorAtZero = factor->getb();
Vector slackInit = zero(errorAtZero.size()); Vector slackInit = errorAtZero.cwiseAbs();
Vector sigmas = jacobian->get_model()->sigmas();
for (size_t i = 0; i < sigmas.size(); ++i) {
if (sigmas[i] < 0) {
slackInit[i] = std::max(errorAtZero[i], 0.0);
} else if (sigmas[i] == 0.0) {
errorAtZero[i] = fabs(errorAtZero[i]);
} // if it has >0 sigma, i.e. normal Gaussian noise, initialize it at 0
}
initialValues.insert(slackKey, slackInit); initialValues.insert(slackKey, slackInit);
slackKey++; slackKey++;
} }
return make_pair(initialValues, firstSlackKey); // Inequality: zi = max(bi, 0)
BOOST_FOREACH(const LinearInequality::shared_ptr& factor, qp_.inequalities) {
Vector errorAtZero = factor->getb();
Vector zeroVec = zero(errorAtZero.size());
Vector slackInit = errorAtZero.cwiseMax(zeroVec);
initialValues.insert(slackKey, slackInit);
slackKey++;
}
return boost::make_tuple(initialValues, firstSlackKey, slackKey - 1);
} }
//****************************************************************************** //******************************************************************************
VectorValues QPSolver::objectiveCoeffsLP(Key firstSlackKey) const { VectorValues QPSolver::objectiveCoeffsLP(Key firstSlackKey) const {
VectorValues slackObjective; VectorValues slackObjective;
for (size_t i = 0; i < constraintIndices_.size(); ++i) {
Key key = firstSlackKey + i; Key slackKey = firstSlackKey;
size_t iFactor = constraintIndices_[i]; // Equalities
JacobianFactor::shared_ptr jacobian = toJacobian(graph_.at(iFactor)); BOOST_FOREACH(const LinearEquality::shared_ptr& factor, qp_.equalities) {
size_t dim = jacobian->rows(); size_t dim = factor->rows();
Vector objective = ones(dim); slackObjective.insert(slackKey, ones(dim));
/* We should not ignore unconstrained slack var dimensions (those rows with sigmas >0) slackKey++;
* because their values might be underdetermined in the LP. Since they will have only
* 1 constraint zi>=0, enforcing them in the min obj function won't harm the other constrained
* slack vars, but also makes them well defined: 0 at the minimum.
*/
slackObjective.insert(key, ones(dim));
} }
// Inequalities
BOOST_FOREACH(const LinearInequality::shared_ptr& factor, qp_.inequalities) {
size_t dim = factor->rows();
slackObjective.insert(slackKey, ones(dim));
slackKey++;
}
return slackObjective; return slackObjective;
} }
//****************************************************************************** //******************************************************************************
pair<GaussianFactorGraph::shared_ptr, VectorValues> QPSolver::constraintsLP( boost::tuple<LinearEqualityFactorGraph::shared_ptr,
LinearInequalityFactorGraph::shared_ptr, VectorValues> QPSolver::constraintsLP(
Key firstSlackKey) const { Key firstSlackKey) const {
// Create constraints and 0 lower bounds (zi>=0) // Create constraints and zero lower bounds (zi>=0)
GaussianFactorGraph::shared_ptr constraints(new GaussianFactorGraph()); LinearEqualityFactorGraph::shared_ptr equalities(new LinearEqualityFactorGraph());
LinearInequalityFactorGraph::shared_ptr inequalities(new LinearInequalityFactorGraph());
VectorValues slackLowerBounds; VectorValues slackLowerBounds;
for (size_t key = firstSlackKey;
key < firstSlackKey + constraintIndices_.size(); ++key) { Key slackKey = firstSlackKey;
size_t iFactor = constraintIndices_[key - firstSlackKey];
JacobianFactor::shared_ptr jacobian = toJacobian(graph_.at(iFactor)); // Equalities
BOOST_FOREACH(const LinearEquality::shared_ptr& factor, qp_.equalities) {
// Collect old terms to form a new factor // Collect old terms to form a new factor
// TODO: it might be faster if we can get the whole block matrix at once // TODO: it might be faster if we can get the whole block matrix at once
// but I don't know how to extend the current VerticalBlockMatrix // but I don't know how to extend the current VerticalBlockMatrix
vector<pair<Key, Matrix> > terms; vector<pair<Key, Matrix> > terms;
for (Factor::iterator it = jacobian->begin(); it != jacobian->end(); ++it) { for (Factor::iterator it = factor->begin(); it != factor->end(); ++it) {
terms.push_back(make_pair(*it, jacobian->getA(it))); terms.push_back(make_pair(*it, factor->getA(it)));
} }
Vector b = factor->getb();
Vector sign_b = b.cwiseQuotient(b.cwiseAbs());
terms.push_back(make_pair(slackKey, sign_b));
equalities->push_back(LinearEquality(terms, b, factor->dualKey()));
// Add lower bound for this slack key
slackLowerBounds.insert(slackKey, zero(b.rows()));
// Increase slackKey for the next slack variable
slackKey++;
}
// Inequalities
BOOST_FOREACH(const LinearInequality::shared_ptr& factor, qp_.inequalities) {
// Collect old terms to form a new factor
// TODO: it might be faster if we can get the whole block matrix at once
// but I don't know how to extend the current VerticalBlockMatrix
vector<pair<Key, Matrix> > terms;
for (Factor::iterator it = factor->begin(); it != factor->end(); ++it) {
terms.push_back(make_pair(*it, factor->getA(it)));
}
// Add the slack term to the constraint // Add the slack term to the constraint
// Unlike Nocedal06book, pg.473, we want ax-z <= b, since we always assume // Unlike Nocedal06book, pg.473, we want ax-z <= b, since we always assume
// LE constraints ax <= b for sigma < 0. // LE constraints ax <= b.
size_t dim = jacobian->rows(); size_t dim = factor->rows();
terms.push_back(make_pair(key, -eye(dim))); terms.push_back(make_pair(slackKey, -eye(dim)));
constraints->push_back( inequalities->push_back(LinearInequality(terms, factor->getb(),
JacobianFactor(terms, jacobian->getb(), jacobian->get_model())); factor->dualKey()));
// Add lower bound for this slack key // Add lower bound for this slack key
slackLowerBounds.insert(key, zero(dim)); slackLowerBounds.insert(slackKey, zero(dim));
// Increase slackKey for the next slack variable
slackKey++;
} }
return make_pair(constraints, slackLowerBounds);
return boost::make_tuple(equalities, inequalities, slackLowerBounds);
} }
//****************************************************************************** //******************************************************************************
@ -520,19 +389,20 @@ pair<bool, VectorValues> QPSolver::findFeasibleInitialValues() const {
static const bool debug = false; static const bool debug = false;
// Initial values with slack variables for the LP subproblem, Nocedal06book, pg.473 // Initial values with slack variables for the LP subproblem, Nocedal06book, pg.473
VectorValues initialValues; VectorValues initialValues;
size_t firstSlackKey; size_t firstSlackKey, lastSlackKey;
boost::tie(initialValues, firstSlackKey) = initialValuesLP(); boost::tie(initialValues, firstSlackKey, lastSlackKey) = initialValuesLP();
// Coefficients for the LP subproblem objective function, min \sum_i z_i // Coefficients for the LP subproblem objective function, min \sum_i z_i
VectorValues objectiveLP = objectiveCoeffsLP(firstSlackKey); VectorValues objectiveLP = objectiveCoeffsLP(firstSlackKey);
// Create constraints and lower bounds of slack variables // Create constraints and lower bounds of slack variables
GaussianFactorGraph::shared_ptr constraints; LinearEqualityFactorGraph::shared_ptr equalities;
LinearInequalityFactorGraph::shared_ptr inequalities;
VectorValues slackLowerBounds; VectorValues slackLowerBounds;
boost::tie(constraints, slackLowerBounds) = constraintsLP(firstSlackKey); boost::tie(equalities, inequalities, slackLowerBounds) = constraintsLP(firstSlackKey);
// Solve the LP subproblem // Solve the LP subproblem
LPSolver lpSolver(objectiveLP, constraints, slackLowerBounds); LPSolver lpSolver(objectiveLP, equalities, inequalities, slackLowerBounds);
VectorValues solution = lpSolver.solve(); VectorValues solution = lpSolver.solve();
if (debug) if (debug)
@ -540,29 +410,34 @@ pair<bool, VectorValues> QPSolver::findFeasibleInitialValues() const {
if (debug) if (debug)
objectiveLP.print("Objective LP: "); objectiveLP.print("Objective LP: ");
if (debug) if (debug)
constraints->print("Constraints LP: "); equalities->print("Equalities LP: ");
if (debug)
inequalities->print("Inequalities LP: ");
if (debug) if (debug)
solution.print("LP solution: "); solution.print("LP solution: ");
// feasible when all slack values are 0s.
double slackSumAbs = 0.0;
for (Key key = firstSlackKey; key <= lastSlackKey; ++key) {
slackSumAbs += solution.at(key).cwiseAbs().sum();
}
// Remove slack variables from solution // Remove slack variables from solution
double slackSum = 0.0; for (Key key = firstSlackKey; key <= lastSlackKey; ++key) {
for (Key key = firstSlackKey; key < firstSlackKey + constraintIndices_.size();
++key) {
slackSum += solution.at(key).cwiseAbs().sum();
solution.erase(key); solution.erase(key);
} }
// Insert zero vectors for free variables that are not in the constraints // Insert zero vectors for free variables that are not in the constraints
BOOST_FOREACH(Key key, fullFactorIndices_ | boost::adaptors::map_keys) { BOOST_FOREACH(Key key, costVariableIndex_ | boost::adaptors::map_keys) {
if (!solution.exists(key)) { if (!solution.exists(key)) {
GaussianFactor::shared_ptr factor = graph_.at( GaussianFactor::shared_ptr factor = qp_.cost.at(
*fullFactorIndices_[key].begin()); *costVariableIndex_[key].begin());
size_t dim = factor->getDim(factor->find(key)); size_t dim = factor->getDim(factor->find(key));
solution.insert(key, zero(dim)); solution.insert(key, zero(dim));
} }
} }
return make_pair(slackSum < 1e-5, solution); return make_pair(slackSumAbs < 1e-5, solution);
} }
//****************************************************************************** //******************************************************************************

View File

@ -7,14 +7,34 @@
#pragma once #pragma once
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/VectorValues.h> #include <gtsam/linear/VectorValues.h>
#include <gtsam_unstable/linear/QP.h>
#include <vector> #include <vector>
#include <set> #include <set>
namespace gtsam { namespace gtsam {
/// This struct holds the state of QPSolver at each iteration
struct QPState {
VectorValues values;
VectorValues duals;
LinearInequalityFactorGraph workingSet;
bool converged;
/// default constructor
QPState() :
values(), duals(), workingSet(), converged(false) {
}
/// constructor with initial values
QPState(const VectorValues& initialValues, const VectorValues& initialDuals,
const LinearInequalityFactorGraph& initialWorkingSet, bool _converged) :
values(initialValues), duals(initialDuals), workingSet(initialWorkingSet), converged(
_converged) {
}
};
/** /**
* This class implements the active set method to solve quadratic programming problems * This class implements the active set method to solve quadratic programming problems
* encoded in a GaussianFactorGraph with special mixed constrained noise models, in which * encoded in a GaussianFactorGraph with special mixed constrained noise models, in which
@ -24,32 +44,41 @@ namespace gtsam {
*/ */
class QPSolver { class QPSolver {
class Hessians: public FactorGraph<HessianFactor> { const QP& qp_; //!< factor graphs of the QP problem, can't be modified!
}; GaussianFactorGraph baseGraph_; //!< factor graphs of cost factors and linear equalities. The working set of inequalities will be added to this base graph in the process.
VariableIndex costVariableIndex_, equalityVariableIndex_,
const GaussianFactorGraph& graph_; //!< the original graph, can't be modified! inequalityVariableIndex_;
FastVector<size_t> constraintIndices_; //!< Indices of constrained factors in the original graph FastSet<Key> constrainedKeys_; //!< all constrained keys, will become factors in the dual graph
Hessians freeHessians_; //!< unconstrained Hessians of constrained variables
VariableIndex freeHessianFactorIndex_; //!< indices of unconstrained Hessian factors of constrained variables
// gtsam calls it "VariableIndex", but I think FactorIndex
// makes more sense, because it really stores factor indices.
VariableIndex fullFactorIndices_; //!< indices of factors involving each variable.
// gtsam calls it "VariableIndex", but I think FactorIndex
// makes more sense, because it really stores factor indices.
public: public:
/// Constructor /// Constructor
QPSolver(const GaussianFactorGraph& graph); QPSolver(const QP& qp);
/// Return indices of all constrained factors /// Find solution with the current working set
FastVector<size_t> constraintIndices() const { VectorValues solveWithCurrentWorkingSet(
return constraintIndices_; const LinearInequalityFactorGraph& workingSet) const;
/// @name Build the dual graph
/// @{
/// Collect the Jacobian terms for a dual factor
template<typename FACTOR>
std::vector<std::pair<Key, Matrix> > collectDualJacobians(Key key,
const FactorGraph<FACTOR>& graph,
const VariableIndex& variableIndex) const {
std::vector<std::pair<Key, Matrix> > Aterms;
BOOST_FOREACH(size_t factorIx, variableIndex[key]){
typename FACTOR::shared_ptr factor = graph.at(factorIx);
Matrix Ai = factor->getA(factor->find(key)).transpose();
Aterms.push_back(std::make_pair(factor->dualKey(), Ai));
}
return Aterms;
} }
/// Return the Hessian factor graph of constrained variables /// Create a dual factor
const Hessians& freeHessiansOfConstrainedVars() const { JacobianFactor::shared_ptr createDualFactor(Key key,
return freeHessians_; const LinearInequalityFactorGraph& workingSet,
} const VectorValues& delta) const;
/** /**
* Build the dual graph to solve for the Lagrange multipliers. * Build the dual graph to solve for the Lagrange multipliers.
@ -78,8 +107,11 @@ public:
* - The constant term b is \grad f(xi), which can be computed from all unconstrained * - The constant term b is \grad f(xi), which can be computed from all unconstrained
* Hessian factors connecting to xi: \grad f(xi) = \sum_j G_ij*xj - gi * Hessian factors connecting to xi: \grad f(xi) = \sum_j G_ij*xj - gi
*/ */
GaussianFactorGraph buildDualGraph(const GaussianFactorGraph& graph, GaussianFactorGraph::shared_ptr buildDualGraph(
const VectorValues& x0, bool useLeastSquare = false) const; const LinearInequalityFactorGraph& workingSet,
const VectorValues& delta) const;
/// @}
/** /**
* The goal of this function is to find currently active inequality constraints * The goal of this function is to find currently active inequality constraints
@ -116,15 +148,15 @@ public:
* *
*/ */
std::pair<int, int> identifyLeavingConstraint( std::pair<int, int> identifyLeavingConstraint(
const LinearInequalityFactorGraph& workingSet,
const VectorValues& lambdas) const; const VectorValues& lambdas) const;
/** /**
* Deactivate or activate an inequality constraint in place * Inactivate or activate an inequality constraint
* Warning: modify in-place to avoid copy/clone
* @return true if update successful
*/ */
bool updateWorkingSetInplace(GaussianFactorGraph& workingGraph, int factorIx, LinearInequalityFactorGraph updateWorkingSet(
int sigmaIx, double newSigma) const; const LinearInequalityFactorGraph& workingSet, int factorIx, int sigmaIx,
double state) const;
/** /**
* Compute step size alpha for the new solution x' = xk + alpha*p, where alpha \in [0,1] * Compute step size alpha for the new solution x' = xk + alpha*p, where alpha \in [0,1]
@ -135,12 +167,11 @@ public:
* in the next iteration * in the next iteration
*/ */
boost::tuple<double, int, int> computeStepSize( boost::tuple<double, int, int> computeStepSize(
const GaussianFactorGraph& workingGraph, const VectorValues& xk, const LinearInequalityFactorGraph& workingSet, const VectorValues& xk,
const VectorValues& p) const; const VectorValues& p) const;
/** Iterate 1 step, modify workingGraph and currentSolution *IN PLACE* !!! */ /** Iterate 1 step, return a new state with a new workingSet and values */
bool iterateInPlace(GaussianFactorGraph& workingGraph, QPState iterate(const QPState& state) const;
VectorValues& currentSolution, VectorValues& lambdas) const;
/** Optimize with a provided initial values /** Optimize with a provided initial values
* For this version, it is the responsibility of the caller to provide * For this version, it is the responsibility of the caller to provide
@ -161,27 +192,27 @@ public:
std::pair<VectorValues, VectorValues> optimize() const; std::pair<VectorValues, VectorValues> optimize() const;
/** /**
* Create initial values for the LP subproblem * find the max key
* @return initial values and the key for the first slack variable
*/ */
std::pair<VectorValues, Key> initialValuesLP() const; std::pair<bool, Key> maxKey(const FastSet<Key>& keys) const;
/**
* Create initial values for the LP subproblem
* @return initial values and the key for the first and last slack variables
*/
boost::tuple<VectorValues, Key, Key> initialValuesLP() const;
/// Create coefficients for the LP subproblem's objective function as the sum of slack var /// Create coefficients for the LP subproblem's objective function as the sum of slack var
VectorValues objectiveCoeffsLP(Key firstSlackKey) const; VectorValues objectiveCoeffsLP(Key firstSlackKey) const;
/// Build constraints and slacks' lower bounds for the LP subproblem /// Build constraints and slacks' lower bounds for the LP subproblem
std::pair<GaussianFactorGraph::shared_ptr, VectorValues> constraintsLP( boost::tuple<LinearEqualityFactorGraph::shared_ptr,
LinearInequalityFactorGraph::shared_ptr, VectorValues> constraintsLP(
Key firstSlackKey) const; Key firstSlackKey) const;
/// Find a feasible initial point /// Find a feasible initial point
std::pair<bool, VectorValues> findFeasibleInitialValues() const; std::pair<bool, VectorValues> findFeasibleInitialValues() const;
private:
/// Collect all free Hessians involving constrained variables into a graph
void findUnconstrainedHessiansOfConstrainedVars(
const std::set<Key>& constrainedVars);
}; };
} /* namespace gtsam */ } /* namespace gtsam */

View File

@ -31,14 +31,14 @@ using namespace gtsam::symbol_shorthand;
/* ************************************************************************* */ /* ************************************************************************* */
// Create test graph according to Forst10book_pg171Ex5 // Create test graph according to Forst10book_pg171Ex5
GaussianFactorGraph createTestCase() { QP createTestCase() {
GaussianFactorGraph graph; QP qp;
// Objective functions x1^2 - x1*x2 + x2^2 - 3*x1 + 5 // Objective functions x1^2 - x1*x2 + x2^2 - 3*x1 + 5
// Note the Hessian encodes: // Note the Hessian encodes:
// 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f
// Hence, we have G11=2, G12 = -1, g1 = +3, G22 = 2, g2 = 0, f = 10 // Hence, we have G11=2, G12 = -1, g1 = +3, G22 = 2, g2 = 0, f = 10
graph.push_back( qp.cost.push_back(
HessianFactor(X(1), X(2), 2.0 * ones(1, 1), -ones(1, 1), 3.0 * ones(1), HessianFactor(X(1), X(2), 2.0 * ones(1, 1), -ones(1, 1), 3.0 * ones(1),
2.0 * ones(1, 1), zero(1), 10.0)); 2.0 * ones(1, 1), zero(1), 10.0));
@ -48,12 +48,9 @@ GaussianFactorGraph createTestCase() {
Matrix A1 = (Matrix(4, 1) << 1, -1, 0, 1); Matrix A1 = (Matrix(4, 1) << 1, -1, 0, 1);
Matrix A2 = (Matrix(4, 1) << 1, 0, -1, 0); Matrix A2 = (Matrix(4, 1) << 1, 0, -1, 0);
Vector b = (Vector(4) << 2, 0, 0, 1.5); Vector b = (Vector(4) << 2, 0, 0, 1.5);
// Special constrained noise model denoting <= inequalities with negative sigmas qp.inequalities.push_back(LinearInequality(X(1), A1, X(2), A2, b, 0));
noiseModel::Constrained::shared_ptr noise =
noiseModel::Constrained::MixedSigmas((Vector(4) << -1, -1, -1, -1));
graph.push_back(JacobianFactor(X(1), A1, X(2), A2, b, noise));
return graph; return qp;
} }
TEST(QPSolver, TestCase) { TEST(QPSolver, TestCase) {
@ -61,49 +58,43 @@ TEST(QPSolver, TestCase) {
double x1 = 5, x2 = 7; double x1 = 5, x2 = 7;
values.insert(X(1), x1 * ones(1, 1)); values.insert(X(1), x1 * ones(1, 1));
values.insert(X(2), x2 * ones(1, 1)); values.insert(X(2), x2 * ones(1, 1));
GaussianFactorGraph graph = createTestCase(); QP qp = createTestCase();
DOUBLES_EQUAL(29, x1*x1 - x1*x2 + x2*x2 - 3*x1 + 5, 1e-9); DOUBLES_EQUAL(29, x1 * x1 - x1 * x2 + x2 * x2 - 3 * x1 + 5, 1e-9);
DOUBLES_EQUAL(29, graph[0]->error(values), 1e-9); DOUBLES_EQUAL(29, qp.cost[0]->error(values), 1e-9);
} }
TEST(QPSolver, constraintsAux) { TEST(QPSolver, constraintsAux) {
GaussianFactorGraph graph = createTestCase(); QP qp = createTestCase();
QPSolver solver(graph); QPSolver solver(qp);
FastVector<size_t> constraintIx = solver.constraintIndices();
LONGS_EQUAL(1, constraintIx.size());
LONGS_EQUAL(1, constraintIx[0]);
VectorValues lambdas; VectorValues lambdas;
lambdas.insert(constraintIx[0], (Vector(4) << -0.5, 0.0, 0.3, 0.1)); lambdas.insert(0, (Vector(4) << -0.5, 0.0, 0.3, 0.1));
int factorIx, lambdaIx; int factorIx, lambdaIx;
boost::tie(factorIx, lambdaIx) = solver.identifyLeavingConstraint(lambdas); boost::tie(factorIx, lambdaIx) = solver.identifyLeavingConstraint(
qp.inequalities, lambdas);
LONGS_EQUAL(1, factorIx); LONGS_EQUAL(1, factorIx);
LONGS_EQUAL(2, lambdaIx); LONGS_EQUAL(2, lambdaIx);
VectorValues lambdas2; VectorValues lambdas2;
lambdas2.insert(constraintIx[0], (Vector(4) << -0.5, 0.0, -0.3, -0.1)); lambdas2.insert(0, (Vector(4) << -0.5, 0.0, -0.3, -0.1));
int factorIx2, lambdaIx2; int factorIx2, lambdaIx2;
boost::tie(factorIx2, lambdaIx2) = solver.identifyLeavingConstraint( boost::tie(factorIx2, lambdaIx2) = solver.identifyLeavingConstraint(
lambdas2); qp.inequalities, lambdas2);
LONGS_EQUAL(-1, factorIx2); LONGS_EQUAL(-1, factorIx2);
LONGS_EQUAL(-1, lambdaIx2); LONGS_EQUAL(-1, lambdaIx2);
HessianFactor expectedFreeHessian(X(1), X(2), 2.0 * ones(1, 1), -ones(1, 1),
3.0 * ones(1), 2.0 * ones(1, 1), zero(1), 1.0);
EXPECT(solver.freeHessiansOfConstrainedVars()[0]->equals(expectedFreeHessian));
} }
/* ************************************************************************* */ /* ************************************************************************* */
// Create a simple test graph with one equality constraint // Create a simple test graph with one equality constraint
GaussianFactorGraph createEqualityConstrainedTest() { QP createEqualityConstrainedTest() {
GaussianFactorGraph graph; QP qp;
// Objective functions x1^2 + x2^2 // Objective functions x1^2 + x2^2
// Note the Hessian encodes: // Note the Hessian encodes:
// 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f
// Hence, we have G11=2, G12 = 0, g1 = 0, G22 = 2, g2 = 0, f = 0 // Hence, we have G11=2, G12 = 0, g1 = 0, G22 = 2, g2 = 0, f = 0
graph.push_back( qp.cost.push_back(
HessianFactor(X(1), X(2), 2.0 * ones(1, 1), zeros(1, 1), zero(1), HessianFactor(X(1), X(2), 2.0 * ones(1, 1), zeros(1, 1), zero(1),
2.0 * ones(1, 1), zero(1), 0.0)); 2.0 * ones(1, 1), zero(1), 0.0));
@ -112,26 +103,24 @@ GaussianFactorGraph createEqualityConstrainedTest() {
Matrix A1 = (Matrix(1, 1) << 1); Matrix A1 = (Matrix(1, 1) << 1);
Matrix A2 = (Matrix(1, 1) << 1); Matrix A2 = (Matrix(1, 1) << 1);
Vector b = -(Vector(1) << 1); Vector b = -(Vector(1) << 1);
// Special constrained noise model denoting <= inequalities with negative sigmas qp.equalities.push_back(LinearEquality(X(1), A1, X(2), A2, b, 0));
noiseModel::Constrained::shared_ptr noise =
noiseModel::Constrained::MixedSigmas((Vector(1) << 0.0));
graph.push_back(JacobianFactor(X(1), A1, X(2), A2, b, noise));
return graph; return qp;
} }
TEST(QPSolver, dual) { TEST(QPSolver, dual) {
GaussianFactorGraph graph = createEqualityConstrainedTest(); QP qp = createEqualityConstrainedTest();
// Initials values // Initials values
VectorValues initialValues; VectorValues initialValues;
initialValues.insert(X(1), ones(1)); initialValues.insert(X(1), ones(1));
initialValues.insert(X(2), ones(1)); initialValues.insert(X(2), ones(1));
QPSolver solver(graph); QPSolver solver(qp);
GaussianFactorGraph dualGraph = solver.buildDualGraph(graph, initialValues); GaussianFactorGraph::shared_ptr dualGraph = solver.buildDualGraph(
VectorValues dual = dualGraph.optimize(); qp.inequalities, initialValues);
VectorValues dual = dualGraph->optimize();
VectorValues expectedDual; VectorValues expectedDual;
expectedDual.insert(1, (Vector(1) << 2.0)); expectedDual.insert(1, (Vector(1) << 2.0));
CHECK(assert_equal(expectedDual, dual, 1e-10)); CHECK(assert_equal(expectedDual, dual, 1e-10));
@ -139,39 +128,36 @@ TEST(QPSolver, dual) {
/* ************************************************************************* */ /* ************************************************************************* */
TEST(QPSolver, iterate) { //TEST(QPSolver, iterate) {
GaussianFactorGraph graph = createTestCase(); // QP qp = createTestCase();
QPSolver solver(graph); // QPSolver solver(qp);
//
GaussianFactorGraph workingGraph = graph.clone(); // VectorValues currentSolution;
// currentSolution.insert(X(1), zero(1));
VectorValues currentSolution; // currentSolution.insert(X(2), zero(1));
currentSolution.insert(X(1), zero(1)); //
currentSolution.insert(X(2), zero(1)); // std::vector<VectorValues> expectedSolutions(3);
// expectedSolutions[0].insert(X(1), (Vector(1) << 4.0 / 3.0));
std::vector<VectorValues> expectedSolutions(3); // expectedSolutions[0].insert(X(2), (Vector(1) << 2.0 / 3.0));
expectedSolutions[0].insert(X(1), (Vector(1) << 4.0 / 3.0)); // expectedSolutions[1].insert(X(1), (Vector(1) << 1.5));
expectedSolutions[0].insert(X(2), (Vector(1) << 2.0 / 3.0)); // expectedSolutions[1].insert(X(2), (Vector(1) << 0.5));
expectedSolutions[1].insert(X(1), (Vector(1) << 1.5)); // expectedSolutions[2].insert(X(1), (Vector(1) << 1.5));
expectedSolutions[1].insert(X(2), (Vector(1) << 0.5)); // expectedSolutions[2].insert(X(2), (Vector(1) << 0.5));
expectedSolutions[2].insert(X(1), (Vector(1) << 1.5)); //
expectedSolutions[2].insert(X(2), (Vector(1) << 0.5)); // bool converged = false;
// int it = 0;
bool converged = false; // while (!converged) {
int it = 0; // VectorValues lambdas;
while (!converged) { // converged = solver.iterateInPlace(workingGraph, currentSolution, lambdas);
VectorValues lambdas; // CHECK(assert_equal(expectedSolutions[it], currentSolution, 1e-100));
converged = solver.iterateInPlace(workingGraph, currentSolution, lambdas); // it++;
CHECK(assert_equal(expectedSolutions[it], currentSolution, 1e-100)); // }
it++; //}
}
}
/* ************************************************************************* */ /* ************************************************************************* */
TEST(QPSolver, optimizeForst10book_pg171Ex5) { TEST(QPSolver, optimizeForst10book_pg171Ex5) {
GaussianFactorGraph graph = createTestCase(); QP qp = createTestCase();
QPSolver solver(graph); QPSolver solver(qp);
VectorValues initialValues; VectorValues initialValues;
initialValues.insert(X(1), zero(1)); initialValues.insert(X(1), zero(1));
initialValues.insert(X(2), zero(1)); initialValues.insert(X(2), zero(1));
@ -185,14 +171,14 @@ TEST(QPSolver, optimizeForst10book_pg171Ex5) {
/* ************************************************************************* */ /* ************************************************************************* */
// Create Matlab's test graph as in http://www.mathworks.com/help/optim/ug/quadprog.html // Create Matlab's test graph as in http://www.mathworks.com/help/optim/ug/quadprog.html
GaussianFactorGraph createTestMatlabQPEx() { QP createTestMatlabQPEx() {
GaussianFactorGraph graph; QP qp;
// Objective functions 0.5*x1^2 + x2^2 - x1*x2 - 2*x1 -6*x2 // Objective functions 0.5*x1^2 + x2^2 - x1*x2 - 2*x1 -6*x2
// Note the Hessian encodes: // Note the Hessian encodes:
// 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f
// Hence, we have G11=1, G12 = -1, g1 = +2, G22 = 2, g2 = +6, f = 0 // Hence, we have G11=1, G12 = -1, g1 = +2, G22 = 2, g2 = +6, f = 0
graph.push_back( qp.cost.push_back(
HessianFactor(X(1), X(2), 1.0 * ones(1, 1), -ones(1, 1), 2.0 * ones(1), HessianFactor(X(1), X(2), 1.0 * ones(1, 1), -ones(1, 1), 2.0 * ones(1),
2.0 * ones(1, 1), 6 * ones(1), 1000.0)); 2.0 * ones(1, 1), 6 * ones(1), 1000.0));
@ -202,17 +188,14 @@ GaussianFactorGraph createTestMatlabQPEx() {
Matrix A1 = (Matrix(5, 1) << 1, -1, 2, -1, 0); Matrix A1 = (Matrix(5, 1) << 1, -1, 2, -1, 0);
Matrix A2 = (Matrix(5, 1) << 1, 2, 1, 0, -1); Matrix A2 = (Matrix(5, 1) << 1, 2, 1, 0, -1);
Vector b = (Vector(5) << 2, 2, 3, 0, 0); Vector b = (Vector(5) << 2, 2, 3, 0, 0);
// Special constrained noise model denoting <= inequalities with negative sigmas qp.inequalities.push_back(LinearInequality(X(1), A1, X(2), A2, b, 0));
noiseModel::Constrained::shared_ptr noise =
noiseModel::Constrained::MixedSigmas((Vector(5) << -1, -1, -1, -1, -1));
graph.push_back(JacobianFactor(X(1), A1, X(2), A2, b, noise));
return graph; return qp;
} }
TEST(QPSolver, optimizeMatlabEx) { TEST(QPSolver, optimizeMatlabEx) {
GaussianFactorGraph graph = createTestMatlabQPEx(); QP qp = createTestMatlabQPEx();
QPSolver solver(graph); QPSolver solver(qp);
VectorValues initialValues; VectorValues initialValues;
initialValues.insert(X(1), zero(1)); initialValues.insert(X(1), zero(1));
initialValues.insert(X(2), zero(1)); initialValues.insert(X(2), zero(1));
@ -226,33 +209,30 @@ TEST(QPSolver, optimizeMatlabEx) {
/* ************************************************************************* */ /* ************************************************************************* */
// Create test graph as in Nocedal06book, Ex 16.4, pg. 475 // Create test graph as in Nocedal06book, Ex 16.4, pg. 475
GaussianFactorGraph createTestNocedal06bookEx16_4() { QP createTestNocedal06bookEx16_4() {
GaussianFactorGraph graph; QP qp;
graph.push_back(JacobianFactor(X(1), ones(1, 1), ones(1))); qp.cost.push_back(JacobianFactor(X(1), ones(1, 1), ones(1)));
graph.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1))); qp.cost.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1)));
// Inequality constraints // Inequality constraints
noiseModel::Constrained::shared_ptr noise = qp.inequalities.push_back(
noiseModel::Constrained::MixedSigmas((Vector(1) << -1)); LinearInequality(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), 2 * ones(1),
graph.push_back( 0));
JacobianFactor(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), 2 * ones(1), qp.inequalities.push_back(
noise)); LinearInequality(X(1), ones(1, 1), X(2), 2 * ones(1, 1), 6 * ones(1), 1));
graph.push_back( qp.inequalities.push_back(
JacobianFactor(X(1), ones(1, 1), X(2), 2 * ones(1, 1), 6 * ones(1), LinearInequality(X(1), ones(1, 1), X(2), -2 * ones(1, 1), 2 * ones(1),
noise)); 2));
graph.push_back( qp.inequalities.push_back(LinearInequality(X(1), -ones(1, 1), zero(1), 3));
JacobianFactor(X(1), ones(1, 1), X(2), -2 * ones(1, 1), 2 * ones(1), qp.inequalities.push_back(LinearInequality(X(2), -ones(1, 1), zero(1), 4));
noise));
graph.push_back(JacobianFactor(X(1), -ones(1, 1), zero(1), noise));
graph.push_back(JacobianFactor(X(2), -ones(1, 1), zero(1), noise));
return graph; return qp;
} }
TEST(QPSolver, optimizeNocedal06bookEx16_4) { TEST(QPSolver, optimizeNocedal06bookEx16_4) {
GaussianFactorGraph graph = createTestNocedal06bookEx16_4(); QP qp = createTestNocedal06bookEx16_4();
QPSolver solver(graph); QPSolver solver(qp);
VectorValues initialValues; VectorValues initialValues;
initialValues.insert(X(1), (Vector(1) << 2.0)); initialValues.insert(X(1), (Vector(1) << 2.0));
initialValues.insert(X(2), zero(1)); initialValues.insert(X(2), zero(1));
@ -288,88 +268,87 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4) {
2.0000 2.0000
0.5000 0.5000
*/ */
GaussianFactorGraph modifyNocedal06bookEx16_4() { QP modifyNocedal06bookEx16_4() {
GaussianFactorGraph graph; QP qp;
graph.push_back(JacobianFactor(X(1), ones(1, 1), ones(1))); qp.cost.push_back(JacobianFactor(X(1), ones(1, 1), ones(1)));
graph.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1))); qp.cost.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1)));
// Inequality constraints // Inequality constraints
noiseModel::Constrained::shared_ptr noise = noiseModel::Constrained::shared_ptr noise =
noiseModel::Constrained::MixedSigmas((Vector(1) << -1)); noiseModel::Constrained::MixedSigmas((Vector(1) << -1));
graph.push_back( qp.inequalities.push_back(
JacobianFactor(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), -1 * ones(1), LinearInequality(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), -1 * ones(1),
noise)); 0));
graph.push_back( qp.inequalities.push_back(
JacobianFactor(X(1), ones(1, 1), X(2), 2 * ones(1, 1), 6 * ones(1), LinearInequality(X(1), ones(1, 1), X(2), 2 * ones(1, 1), 6 * ones(1), 1));
noise)); qp.inequalities.push_back(
graph.push_back( LinearInequality(X(1), ones(1, 1), X(2), -2 * ones(1, 1), 2 * ones(1),
JacobianFactor(X(1), ones(1, 1), X(2), -2 * ones(1, 1), 2 * ones(1), 2));
noise)); qp.inequalities.push_back(LinearInequality(X(1), -ones(1, 1), zero(1), 3));
graph.push_back(JacobianFactor(X(1), -ones(1, 1), zero(1), noise)); qp.inequalities.push_back(LinearInequality(X(2), -ones(1, 1), zero(1), 4));
graph.push_back(JacobianFactor(X(2), -ones(1, 1), zero(1), noise));
return graph; return qp;
} }
TEST(QPSolver, optimizeNocedal06bookEx16_4_findInitialPoint) { TEST(QPSolver, optimizeNocedal06bookEx16_4_findInitialPoint) {
GaussianFactorGraph graph = modifyNocedal06bookEx16_4(); QP qp = modifyNocedal06bookEx16_4();
QPSolver solver(graph); QPSolver solver(qp);
VectorValues initialsLP; VectorValues initialsLP;
Key firstSlackKey; Key firstSlackKey, lastSlackKey;
boost::tie(initialsLP, firstSlackKey) = solver.initialValuesLP(); boost::tie(initialsLP, firstSlackKey, lastSlackKey) = solver.initialValuesLP();
EXPECT(assert_equal(zero(1), initialsLP.at(X(1)))); EXPECT(assert_equal(zero(1), initialsLP.at(X(1))));
EXPECT(assert_equal(zero(1), initialsLP.at(X(2)))); EXPECT(assert_equal(zero(1), initialsLP.at(X(2))));
LONGS_EQUAL(X(2)+1, firstSlackKey); LONGS_EQUAL(X(2) + 1, firstSlackKey);
EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey))); EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey)));
EXPECT(assert_equal(ones(1)*6.0, initialsLP.at(firstSlackKey+1))); EXPECT(assert_equal(ones(1) * 6.0, initialsLP.at(firstSlackKey + 1)));
EXPECT(assert_equal(ones(1)*2.0, initialsLP.at(firstSlackKey+2))); EXPECT(assert_equal(ones(1) * 2.0, initialsLP.at(firstSlackKey + 2)));
EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey+3))); EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey + 3)));
EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey+4))); EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey + 4)));
VectorValues objCoeffs = solver.objectiveCoeffsLP(firstSlackKey); VectorValues objCoeffs = solver.objectiveCoeffsLP(firstSlackKey);
for (size_t i = 0; i < 5; ++i) for (size_t i = 0; i < 5; ++i)
EXPECT(assert_equal(ones(1), objCoeffs.at(firstSlackKey+i))); EXPECT(assert_equal(ones(1), objCoeffs.at(firstSlackKey + i)));
GaussianFactorGraph::shared_ptr constraints; LinearEqualityFactorGraph::shared_ptr equalities;
LinearInequalityFactorGraph::shared_ptr inequalities;
VectorValues lowerBounds; VectorValues lowerBounds;
boost::tie(constraints, lowerBounds) = solver.constraintsLP(firstSlackKey); boost::tie(equalities, inequalities, lowerBounds) = solver.constraintsLP(
firstSlackKey);
for (size_t i = 0; i < 5; ++i) for (size_t i = 0; i < 5; ++i)
EXPECT(assert_equal(zero(1), lowerBounds.at(firstSlackKey+i))); EXPECT(assert_equal(zero(1), lowerBounds.at(firstSlackKey + i)));
GaussianFactorGraph expectedConstraints; LinearInequalityFactorGraph expectedInequalities;
noiseModel::Constrained::shared_ptr noise = expectedInequalities.push_back(
noiseModel::Constrained::MixedSigmas((Vector(1) << -1)); LinearInequality(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), X(3),
expectedConstraints.push_back( -ones(1, 1), -1 * ones(1), 0));
JacobianFactor(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), X(3), -ones(1, 1), expectedInequalities.push_back(
-1 * ones(1), noise)); LinearInequality(X(1), ones(1, 1), X(2), 2 * ones(1, 1), X(4),
expectedConstraints.push_back( -ones(1, 1), 6 * ones(1), 1));
JacobianFactor(X(1), ones(1, 1), X(2), 2 * ones(1, 1), X(4), -ones(1, 1), expectedInequalities.push_back(
6 * ones(1), noise)); LinearInequality(X(1), ones(1, 1), X(2), -2 * ones(1, 1), X(5),
expectedConstraints.push_back( -ones(1, 1), 2 * ones(1), 2));
JacobianFactor(X(1), ones(1, 1), X(2), -2 * ones(1, 1), X(5), -ones(1, 1), expectedInequalities.push_back(
2 * ones(1), noise)); LinearInequality(X(1), -ones(1, 1), X(6), -ones(1, 1), zero(1), 3));
expectedConstraints.push_back( expectedInequalities.push_back(
JacobianFactor(X(1), -ones(1, 1), X(6), -ones(1, 1), zero(1), noise)); LinearInequality(X(2), -ones(1, 1), X(7), -ones(1, 1), zero(1), 4));
expectedConstraints.push_back( EXPECT(assert_equal(expectedInequalities, *inequalities));
JacobianFactor(X(2), -ones(1, 1), X(7), -ones(1, 1), zero(1), noise));
EXPECT(assert_equal(expectedConstraints, *constraints));
bool isFeasible; bool isFeasible;
VectorValues initialValues; VectorValues initialValues;
boost::tie(isFeasible, initialValues) = solver.findFeasibleInitialValues(); boost::tie(isFeasible, initialValues) = solver.findFeasibleInitialValues();
EXPECT(assert_equal(1.0*ones(1), initialValues.at(X(1)))); EXPECT(assert_equal(1.0 * ones(1), initialValues.at(X(1))));
EXPECT(assert_equal(0.0*ones(1), initialValues.at(X(2)))); EXPECT(assert_equal(0.0 * ones(1), initialValues.at(X(2))));
VectorValues solution; VectorValues solution;
boost::tie(solution, boost::tuples::ignore) = solver.optimize(); boost::tie(solution, boost::tuples::ignore) = solver.optimize();
EXPECT(assert_equal(2.0*ones(1), solution.at(X(1)))); EXPECT(assert_equal(2.0 * ones(1), solution.at(X(1))));
EXPECT(assert_equal(0.5*ones(1), solution.at(X(2)))); EXPECT(assert_equal(0.5 * ones(1), solution.at(X(2))));
} }
TEST(QPSolver, optimizeNocedal06bookEx16_4_2) { TEST(QPSolver, optimizeNocedal06bookEx16_4_2) {
GaussianFactorGraph graph = createTestNocedal06bookEx16_4(); QP qp = createTestNocedal06bookEx16_4();
QPSolver solver(graph); QPSolver solver(qp);
VectorValues initialValues; VectorValues initialValues;
initialValues.insert(X(1), (Vector(1) << 0.0)); initialValues.insert(X(1), (Vector(1) << 0.0));
initialValues.insert(X(2), (Vector(1) << 100.0)); initialValues.insert(X(2), (Vector(1) << 100.0));
@ -391,17 +370,16 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4_2) {
/* ************************************************************************* */ /* ************************************************************************* */
TEST(QPSolver, failedSubproblem) { TEST(QPSolver, failedSubproblem) {
GaussianFactorGraph graph; QP qp;
graph.push_back(JacobianFactor(X(1), eye(2), zero(2))); qp.cost.push_back(JacobianFactor(X(1), eye(2), zero(2)));
graph.push_back(HessianFactor(X(1), zeros(2, 2), zero(2), 100.0)); qp.cost.push_back(HessianFactor(X(1), zeros(2, 2), zero(2), 100.0));
graph.push_back( qp.inequalities.push_back(
JacobianFactor(X(1), (Matrix(1, 2) << -1.0, 0.0), -ones(1), LinearInequality(X(1), (Matrix(1, 2) << -1.0, 0.0), -ones(1), 0));
noiseModel::Constrained::MixedSigmas(-ones(1))));
VectorValues expected; VectorValues expected;
expected.insert(X(1), (Vector(2) << 1.0, 0.0)); expected.insert(X(1), (Vector(2) << 1.0, 0.0));
QPSolver solver(graph); QPSolver solver(qp);
VectorValues solution; VectorValues solution;
boost::tie(solution, boost::tuples::ignore) = solver.optimize(); boost::tie(solution, boost::tuples::ignore) = solver.optimize();
// graph.print("Graph: "); // graph.print("Graph: ");