diff --git a/gtsam_unstable/linear/LPSolver.cpp b/gtsam_unstable/linear/LPSolver.cpp index e12fa477e..4c85a1f5c 100644 --- a/gtsam_unstable/linear/LPSolver.cpp +++ b/gtsam_unstable/linear/LPSolver.cpp @@ -28,21 +28,30 @@ void LPSolver::buildMetaInformation() { freeVars_.insert(key); } // Now collect remaining keys in constraints - VariableIndex factorIndex(*constraints_); + VariableIndex factorIndex(*equalities_); BOOST_FOREACH(Key key, factorIndex | boost::adaptors::map_keys) { if (!variableColumnNo_.count(key)) { - JacobianFactor::shared_ptr jacobian = boost::dynamic_pointer_cast< - JacobianFactor>(constraints_->at(*factorIndex[key].begin())); - if (!jacobian || !jacobian->isConstrained()) { - throw runtime_error("Invalid constrained graph!"); - } - size_t dim = jacobian->getDim(jacobian->find(key)); + LinearEquality::shared_ptr factor = equalities_->at(*factorIndex[key].begin()); + size_t dim = factor->getDim(factor->find(key)); variableColumnNo_.insert(make_pair(key, firstVarIndex)); variableDims_.insert(make_pair(key, dim)); firstVarIndex += variableDims_[key]; freeVars_.insert(key); } } + + VariableIndex factorIndex2(*inequalities_); + BOOST_FOREACH(Key key, factorIndex2 | boost::adaptors::map_keys) { + if (!variableColumnNo_.count(key)) { + LinearInequality::shared_ptr factor = inequalities_->at(*factorIndex2[key].begin()); + size_t dim = factor->getDim(factor->find(key)); + variableColumnNo_.insert(make_pair(key, firstVarIndex)); + variableDims_.insert(make_pair(key, dim)); + firstVarIndex += variableDims_[key]; + freeVars_.insert(key); + } + } + // Collect the remaining keys in lowerBounds BOOST_FOREACH(Key key, lowerBounds_ | boost::adaptors::map_keys) { if (!variableColumnNo_.count(key)) { @@ -67,7 +76,7 @@ void LPSolver::buildMetaInformation() { /* ************************************************************************* */ void LPSolver::addConstraints(const boost::shared_ptr& lp, - const JacobianFactor::shared_ptr& jacobian) const { + const JacobianFactor::shared_ptr& jacobian, int constraintType) const { if (!jacobian || !jacobian->isConstrained()) throw runtime_error("LP only accepts constrained factors!"); @@ -76,7 +85,6 @@ void LPSolver::addConstraints(const boost::shared_ptr& lp, vector columnNo = buildColumnNo(keys); // Add each row to lp one by one. TODO: is there a faster way? - Vector sigmas = jacobian->get_model()->sigmas(); Matrix A = jacobian->getA(); Vector b = jacobian->getb(); for (int i = 0; i < A.rows(); ++i) { @@ -88,11 +96,6 @@ void LPSolver::addConstraints(const boost::shared_ptr& lp, // so we have to make a new copy for every row!!!!! vector columnNoCopy(columnNo); - if (sigmas[i] > 0) { - cout << "Warning: Ignore Gaussian noise (sigma>0) in LP constraints!" - << endl; - } - int constraintType = (sigmas[i] < 0) ? LE : EQ; if (!add_constraintex(lp.get(), columnNoCopy.size(), r.data(), columnNoCopy.data(), constraintType, b[i])) throw runtime_error("LP can't accept Gaussian noise!"); @@ -132,13 +135,17 @@ boost::shared_ptr LPSolver::buildModel() const { // Makes building the model faster if it is done rows by row set_add_rowmode(lp.get(), TRUE); - // Add constraints - BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, *constraints_) { - JacobianFactor::shared_ptr jacobian = boost::dynamic_pointer_cast< - JacobianFactor>(factor); - addConstraints(lp, jacobian); + // Add equality constraints + BOOST_FOREACH(const LinearEquality::shared_ptr& factor, *equalities_) { + addConstraints(lp, factor, EQ); } + // Add inequality constraints + BOOST_FOREACH(const LinearInequality::shared_ptr& factor, *inequalities_) { + addConstraints(lp, factor, LE); + } + + // Add bounds addBounds(lp); diff --git a/gtsam_unstable/linear/LPSolver.h b/gtsam_unstable/linear/LPSolver.h index 73382fe3f..99bf54450 100644 --- a/gtsam_unstable/linear/LPSolver.h +++ b/gtsam_unstable/linear/LPSolver.h @@ -7,9 +7,10 @@ #pragma once -#include #include #include +#include +#include #include @@ -25,7 +26,8 @@ namespace gtsam { */ class LPSolver { VectorValues objectiveCoeffs_; - GaussianFactorGraph::shared_ptr constraints_; + LinearEqualityFactorGraph::shared_ptr equalities_; + LinearInequalityFactorGraph::shared_ptr inequalities_; VectorValues lowerBounds_, upperBounds_; std::map variableColumnNo_, variableDims_; size_t nrColumns_; @@ -38,11 +40,12 @@ public: * set as unbounded, i.e. -inf <= x <= inf. */ LPSolver(const VectorValues& objectiveCoeffs, - const GaussianFactorGraph::shared_ptr& constraints, + const LinearEqualityFactorGraph::shared_ptr& equalities, + const LinearInequalityFactorGraph::shared_ptr& inequalities, const VectorValues& lowerBounds = VectorValues(), const VectorValues& upperBounds = VectorValues()) : - objectiveCoeffs_(objectiveCoeffs), constraints_(constraints), lowerBounds_( - lowerBounds), upperBounds_(upperBounds) { + objectiveCoeffs_(objectiveCoeffs), equalities_(equalities), inequalities_( + inequalities), lowerBounds_(lowerBounds), upperBounds_(upperBounds) { buildMetaInformation(); } @@ -73,18 +76,18 @@ public: template std::vector buildColumnNo(const KEYLIST& keyList) const { std::vector columnNo; - BOOST_FOREACH(Key key, keyList) { - std::vector varIndices = boost::copy_range >( - boost::irange(variableColumnNo_.at(key), - variableColumnNo_.at(key) + variableDims_.at(key))); - columnNo.insert(columnNo.end(), varIndices.begin(), varIndices.end()); - } + BOOST_FOREACH(Key key, keyList){ + std::vector varIndices = boost::copy_range >( + boost::irange(variableColumnNo_.at(key), + variableColumnNo_.at(key) + variableDims_.at(key))); + columnNo.insert(columnNo.end(), varIndices.begin(), varIndices.end()); + } return columnNo; } /// Add all [scalar] constraints in a constrained Jacobian factor to lp void addConstraints(const boost::shared_ptr& lp, - const JacobianFactor::shared_ptr& jacobian) const; + const JacobianFactor::shared_ptr& jacobian, int type) const; /** * Add all bounds to lp. diff --git a/gtsam_unstable/linear/LinearEquality.h b/gtsam_unstable/linear/LinearEquality.h new file mode 100644 index 000000000..ba80e16bf --- /dev/null +++ b/gtsam_unstable/linear/LinearEquality.h @@ -0,0 +1,140 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * LinearEquality.h + * @brief: LinearEquality derived from Base with constrained noise model + * @date: Nov 27, 2014 + * @author: thduynguyen + */ + +#pragma once + +#include + +namespace gtsam { + +/** + * This class defines Linear constraints by inherit Base + * with the special Constrained noise model + */ +class LinearEquality: public JacobianFactor { +public: + typedef LinearEquality This; ///< Typedef to this class + typedef JacobianFactor Base; ///< Typedef to base class + typedef boost::shared_ptr shared_ptr; ///< shared_ptr to this class + +private: + Key dualKey_; + +public: + /** default constructor for I/O */ + LinearEquality() : + Base() { + } + + /** Conversion from HessianFactor (does Cholesky to obtain Jacobian matrix) */ + explicit LinearEquality(const HessianFactor& hf) { + throw std::runtime_error("Cannot convert HessianFactor to LinearEquality"); + } + + /** Construct unary factor */ + LinearEquality(Key i1, const Matrix& A1, const Vector& b, Key dualKey) : + Base(i1, A1, b, noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) { + } + + /** Construct binary factor */ + LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, + const Vector& b, Key dualKey) : + Base(i1, A1, i2, A2, b, noiseModel::Constrained::All(b.rows())), dualKey_( + dualKey) { + } + + /** Construct ternary factor */ + LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3, + const Matrix& A3, const Vector& b, Key dualKey) : + Base(i1, A1, i2, A2, i3, A3, b, noiseModel::Constrained::All(b.rows())), dualKey_( + dualKey) { + } + + /** Construct four-ary factor */ + LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3, + const Matrix& A3, Key i4, const Matrix& A4, const Vector& b, Key dualKey) : + Base(i1, A1, i2, A2, i3, A3, i4, A4, b, + noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) { + } + + /** Construct five-ary factor */ + LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3, + const Matrix& A3, Key i4, const Matrix& A4, Key i5, const Matrix& A5, + const Vector& b, Key dualKey) : + Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, b, + noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) { + } + + /** Construct six-ary factor */ + LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3, + const Matrix& A3, Key i4, const Matrix& A4, Key i5, const Matrix& A5, + Key i6, const Matrix& A6, const Vector& b, Key dualKey) : + Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, i6, A6, b, + noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) { + } + + /** Construct an n-ary factor + * @tparam TERMS A container whose value type is std::pair, specifying the + * collection of keys and matrices making up the factor. */ + template + LinearEquality(const TERMS& terms, const Vector& b, Key dualKey) : + Base(terms, b, noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) { + } + + /** Virtual destructor */ + virtual ~LinearEquality() { + } + + /** equals */ + virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const { + return Base::equals(lf, tol); + } + + /** print */ + virtual void print(const std::string& s = "", const KeyFormatter& formatter = + DefaultKeyFormatter) const { + Base::print(s, formatter); + } + + /** Clone this LinearEquality */ + virtual GaussianFactor::shared_ptr clone() const { + return boost::static_pointer_cast( + boost::make_shared(*this)); + } + + /// dual key + Key dualKey() const { return dualKey_; } + + /** Special error_vector for constraints (A*x-b) */ + Vector error_vector(const VectorValues& c) const { + return unweighted_error(c); + } + + /** Special error for constraints. + * I think it should be zero, as this function is meant for objective cost. + * But the name "error" can be misleading. + * TODO: confirm with Frank!! */ + virtual double error(const VectorValues& c) const { + return 0.0; + } + +}; +// LinearEquality + +}// gtsam + diff --git a/gtsam_unstable/linear/LinearEqualityFactorGraph.h b/gtsam_unstable/linear/LinearEqualityFactorGraph.h new file mode 100644 index 000000000..fd9191250 --- /dev/null +++ b/gtsam_unstable/linear/LinearEqualityFactorGraph.h @@ -0,0 +1,32 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * LinearEqualityFactorGraph.h + * @brief: Factor graph of all LinearEquality factors + * @date: Dec 8, 2014 + * @author: thduynguyen + */ + +#pragma once + +#include +#include + +namespace gtsam { + +class LinearEqualityFactorGraph : public FactorGraph { +public: + typedef boost::shared_ptr shared_ptr; +}; + +} // namespace gtsam + diff --git a/gtsam_unstable/linear/LinearInequality.h b/gtsam_unstable/linear/LinearInequality.h new file mode 100644 index 000000000..0edec2ccb --- /dev/null +++ b/gtsam_unstable/linear/LinearInequality.h @@ -0,0 +1,152 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * LinearInequality.h + * @brief: LinearInequality derived from Base with constrained noise model + * @date: Nov 27, 2014 + * @author: thduynguyen + */ + +#pragma once + +#include + +namespace gtsam { + +/** + * This class defines Linear constraints by inherit Base + * with the special Constrained noise model + */ +class LinearInequality: public JacobianFactor { +public: + typedef LinearInequality This; ///< Typedef to this class + typedef JacobianFactor Base; ///< Typedef to base class + typedef boost::shared_ptr shared_ptr; ///< shared_ptr to this class + +private: + Key dualKey_; + +public: + /** default constructor for I/O */ + LinearInequality() : + Base() { + } + + /** Conversion from HessianFactor (does Cholesky to obtain Jacobian matrix) */ + explicit LinearInequality(const HessianFactor& hf) { + throw std::runtime_error( + "Cannot convert HessianFactor to LinearInequality"); + } + + /** Construct unary factor */ + LinearInequality(Key i1, const Matrix& A1, const Vector& b, Key dualKey) : + Base(i1, A1, b, noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) { + } + + /** Construct binary factor */ + LinearInequality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, + const Vector& b, Key dualKey) : + Base(i1, A1, i2, A2, b, noiseModel::Constrained::All(b.rows())), dualKey_( + dualKey) { + } + + /** Construct ternary factor */ + LinearInequality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3, + const Matrix& A3, const Vector& b, Key dualKey) : + Base(i1, A1, i2, A2, i3, A3, b, noiseModel::Constrained::All(b.rows())), dualKey_( + dualKey) { + } + + /** Construct four-ary factor */ + LinearInequality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3, + const Matrix& A3, Key i4, const Matrix& A4, const Vector& b, Key dualKey) : + Base(i1, A1, i2, A2, i3, A3, i4, A4, b, + noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) { + } + + /** Construct five-ary factor */ + LinearInequality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3, + const Matrix& A3, Key i4, const Matrix& A4, Key i5, const Matrix& A5, + const Vector& b, Key dualKey) : + Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, b, + noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) { + } + + /** Construct six-ary factor */ + LinearInequality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3, + const Matrix& A3, Key i4, const Matrix& A4, Key i5, const Matrix& A5, + Key i6, const Matrix& A6, const Vector& b, Key dualKey) : + Base(i1, A1, i2, A2, i3, A3, i4, A4, i5, A5, i6, A6, b, + noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) { + } + + /** Construct an n-ary factor + * @tparam TERMS A container whose value type is std::pair, specifying the + * collection of keys and matrices making up the factor. */ + template + LinearInequality(const TERMS& terms, const Vector& b, Key dualKey) : + Base(terms, b, noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) { + } + + /** Virtual destructor */ + virtual ~LinearInequality() { + } + + /** equals */ + virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const { + return Base::equals(lf, tol); + } + + /** print */ + virtual void print(const std::string& s = "", const KeyFormatter& formatter = + DefaultKeyFormatter) const { + Base::print(s, formatter); + } + + /** Clone this LinearInequality */ + virtual GaussianFactor::shared_ptr clone() const { + return boost::static_pointer_cast( + boost::make_shared(*this)); + } + + /// dual key + Key dualKey() const { return dualKey_; } + + /** Special error_vector for constraints (A*x-b) */ + Vector error_vector(const VectorValues& c) const { + return unweighted_error(c); + } + + /** Special error for constraints. + * I think it should be zero, as this function is meant for objective cost. + * But the name "error" can be misleading. + * TODO: confirm with Frank!! */ + virtual double error(const VectorValues& c) const { + return 0.0; + } + + /** dot product of row s with the corresponding vector in p */ + double dotProductRow(size_t s, const VectorValues& p) const { + double ajTp = 0.0; + for (const_iterator xj = begin(); xj != end(); ++xj) { + Vector pj = p.at(*xj); + Vector aj = getA(xj).row(s); + ajTp += aj.dot(pj); + } + return ajTp; + } + +}; +// LinearInequality + +}// gtsam + diff --git a/gtsam_unstable/linear/LinearInequalityFactorGraph.h b/gtsam_unstable/linear/LinearInequalityFactorGraph.h new file mode 100644 index 000000000..bfc64df2a --- /dev/null +++ b/gtsam_unstable/linear/LinearInequalityFactorGraph.h @@ -0,0 +1,48 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * LinearInequalityFactorGraph.h + * @brief: Factor graph of all LinearInequality factors + * @date: Dec 8, 2014 + * @author: thduynguyen + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { + +class LinearInequalityFactorGraph: public FactorGraph { +private: + typedef FactorGraph Base; + +public: + typedef boost::shared_ptr shared_ptr; + + /** print */ + void print(const std::string& str, const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const { + Base::print(str, keyFormatter); + } + + /** equals */ + bool equals(const LinearInequalityFactorGraph& other, + double tol = 1e-9) const { + return Base::equals(other, tol); + } +}; + +} // namespace gtsam + diff --git a/gtsam_unstable/linear/QP.h b/gtsam_unstable/linear/QP.h new file mode 100644 index 000000000..111ab506f --- /dev/null +++ b/gtsam_unstable/linear/QP.h @@ -0,0 +1,58 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * QP.h + * @brief: Factor graphs of a Quadratic Programming problem + * @date: Dec 8, 2014 + * @author: thduynguyen + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { + +/** + * struct contains factor graphs of a Quadratic Programming problem + */ +struct QP { + GaussianFactorGraph cost; //!< Quadratic cost factors + LinearEqualityFactorGraph equalities; //!< linear equality constraints + LinearInequalityFactorGraph inequalities; //!< linear inequality constraints + + /** default constructor */ + QP() : + cost(), equalities(), inequalities() { + } + + /** constructor */ + QP(const GaussianFactorGraph& _cost, + const LinearEqualityFactorGraph& _linearEqualities, + const LinearInequalityFactorGraph& _linearInequalities) : + cost(_cost), equalities(_linearEqualities), inequalities( + _linearInequalities) { + } + + /** print */ + void print(const std::string& s = "") { + std::cout << s << std::endl; + cost.print("Quadratic cost factors: "); + equalities.print("Linear equality factors: "); + inequalities.print("Linear inequality factors: "); + } +}; + +} // namespace gtsam + diff --git a/gtsam_unstable/linear/QPSolver.cpp b/gtsam_unstable/linear/QPSolver.cpp index ac6bc549c..7872ce27e 100644 --- a/gtsam_unstable/linear/QPSolver.cpp +++ b/gtsam_unstable/linear/QPSolver.cpp @@ -6,255 +6,85 @@ */ #include +#include #include #include -#include #include using namespace std; +#define ACTIVE 0.0 +#define INACTIVE std::numeric_limits::infinity() + namespace gtsam { -/// Convert a Gaussian factor to a jacobian. return empty shared ptr if failed -static JacobianFactor::shared_ptr toJacobian( - const GaussianFactor::shared_ptr& factor) { - JacobianFactor::shared_ptr jacobian( - boost::dynamic_pointer_cast(factor)); - return jacobian; +//****************************************************************************** +QPSolver::QPSolver(const QP& qp) : qp_(qp) { + baseGraph_ = qp_.cost; + baseGraph_.push_back(qp_.equalities.begin(), qp_.equalities.end()); + costVariableIndex_ = VariableIndex(qp_.cost); + equalityVariableIndex_ = VariableIndex(qp_.equalities); + inequalityVariableIndex_ = VariableIndex(qp_.inequalities); + constrainedKeys_ = qp_.equalities.keys(); + constrainedKeys_.merge(qp_.inequalities.keys()); } //****************************************************************************** -QPSolver::QPSolver(const GaussianFactorGraph& graph) : - graph_(graph), fullFactorIndices_(graph) { - // Split the original graph into unconstrained and constrained part - // and collect indices of constrained factors - for (size_t i = 0; i < graph.nrFactors(); ++i) { - // obtain the factor and its noise model - JacobianFactor::shared_ptr jacobian = toJacobian(graph.at(i)); - if (jacobian && jacobian->get_model() - && jacobian->get_model()->isConstrained()) { - constraintIndices_.push_back(i); - } - } - - // Collect constrained variable keys - set constrainedVars; - BOOST_FOREACH(size_t index, constraintIndices_) { - KeyVector keys = graph.at(index)->keys(); - constrainedVars.insert(keys.begin(), keys.end()); - } - - // Collect unconstrained hessians of constrained vars to build dual graph - findUnconstrainedHessiansOfConstrainedVars(constrainedVars); - freeHessianFactorIndex_ = VariableIndex(freeHessians_); +VectorValues QPSolver::solveWithCurrentWorkingSet( + const LinearInequalityFactorGraph& workingSet) const { + GaussianFactorGraph workingGraph = baseGraph_; + workingGraph.push_back(workingSet); + return workingGraph.optimize(); } //****************************************************************************** -void QPSolver::findUnconstrainedHessiansOfConstrainedVars( - const set& constrainedVars) { - VariableIndex variableIndex(graph_); +JacobianFactor::shared_ptr QPSolver::createDualFactor(Key key, + const LinearInequalityFactorGraph& workingSet, const VectorValues& delta) const { - // Collect all factors involving constrained vars - FastSet factors; - BOOST_FOREACH(Key key, constrainedVars) { - VariableIndex::Factors factorsOfThisVar = variableIndex[key]; - BOOST_FOREACH(size_t factorIndex, factorsOfThisVar) { - factors.insert(factorIndex); - } - } + // Transpose the A matrix of constrained factors to have the jacobian of the dual key + std::vector > Aterms = collectDualJacobians + < LinearEquality > (key, qp_.equalities, equalityVariableIndex_); + std::vector > AtermsInequalities = collectDualJacobians + < LinearInequality > (key, workingSet, inequalityVariableIndex_); + Aterms.insert(Aterms.end(), AtermsInequalities.begin(), + AtermsInequalities.end()); - // Convert each factor into Hessian - BOOST_FOREACH(size_t factorIndex, factors) { - GaussianFactor::shared_ptr gf = graph_[factorIndex]; - if (!gf) - continue; - // See if this is a Jacobian factor - JacobianFactor::shared_ptr jf = // - boost::dynamic_pointer_cast(gf); - if (jf) { - // Dealing with mixed constrained factor - if (jf->get_model() && jf->isConstrained()) { - // Turn a mixed-constrained factor into a factor with 0 information on the constrained part - Vector sigmas = jf->get_model()->sigmas(); - Vector newPrecisions(sigmas.size()); - bool mixed = false; - for (size_t s = 0; s < sigmas.size(); ++s) { - if (sigmas[s] <= 1e-9) - newPrecisions[s] = 0.0; // 0 info for constraints (both inequality and eq) - else { - newPrecisions[s] = 1.0 / sigmas[s]; - mixed = true; - } - } - if (mixed) { // only add free hessians if it's mixed - JacobianFactor::shared_ptr newJacobian = toJacobian(jf->clone()); - newJacobian->setModel( - noiseModel::Diagonal::Precisions(newPrecisions)); - freeHessians_.push_back(HessianFactor(*newJacobian)); - } - } else { // unconstrained Jacobian - // Convert the original linear factor to Hessian factor - // TODO: This may fail and throw the following exception - // Assertion failed: (((!PanelMode) && stride==0 && offset==0) || - // (PanelMode && stride>=depth && offset<=stride)), function operator(), - // file Eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h, line 1133. - // because of a weird error which might be related to clang - // See this: https://groups.google.com/forum/#!topic/ceres-solver/DYhqOLPquHU - // My current way to fix this is to compile both gtsam and my library in Release mode - freeHessians_.add(HessianFactor(*jf)); - } - } else { // If it's not a Jacobian, it should be a hessian factor. Just add! - HessianFactor::shared_ptr hf = // - boost::dynamic_pointer_cast(gf); - if (hf) - freeHessians_.push_back(hf); - } + // Collect the gradients of unconstrained cost factors to the b vector + Vector b = zero(delta.at(key).size()); + BOOST_FOREACH(size_t factorIx, costVariableIndex_[key]) { + GaussianFactor::shared_ptr factor = qp_.cost.at(factorIx); + b += factor->gradient(key, delta); } + return boost::make_shared(Aterms, b); } //****************************************************************************** -GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph, - const VectorValues& x0, bool useLeastSquare) const { - static const bool debug = false; - - // The dual graph to return - GaussianFactorGraph dualGraph; - - // For each variable xi involving in some constraint, compute the unconstrained gradient - // wrt xi from the prebuilt freeHessian graph - // \grad f(xi) = \frac{\partial f}{\partial xi}' = \sum_j G_ij*xj - gi - if (debug) - freeHessianFactorIndex_.print("freeHessianFactorIndex_: "); - BOOST_FOREACH(const VariableIndex::value_type& xiKey_factors, freeHessianFactorIndex_) { - Key xiKey = xiKey_factors.first; - VariableIndex::Factors xiFactors = xiKey_factors.second; - - // Find xi's dim from the first factor on xi - if (xiFactors.size() == 0) - continue; - GaussianFactor::shared_ptr xiFactor0 = freeHessians_.at(*xiFactors.begin()); - size_t xiDim = xiFactor0->getDim(xiFactor0->find(xiKey)); - if (debug) - xiFactor0->print("xiFactor0: "); - if (debug) - cout << "xiKey: " << string(Symbol(xiKey)) << ", xiDim: " << xiDim - << endl; - - //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - // Compute the b-vector for the dual factor Ax-b - // b = gradf(xi) = \frac{\partial f}{\partial xi}' = \sum_j G_ij*xj - gi - Vector gradf_xi = zero(xiDim); - BOOST_FOREACH(size_t factorIx, xiFactors) { - HessianFactor::shared_ptr factor = freeHessians_.at(factorIx); - Factor::const_iterator xi = factor->find(xiKey); - // Sum over Gij*xj for all xj connecting to xi - for (Factor::const_iterator xj = factor->begin(); xj != factor->end(); - ++xj) { - // Obtain Gij from the Hessian factor - // Hessian factor only stores an upper triangular matrix, so be careful when i>j - Matrix Gij; - if (xi > xj) { - Matrix Gji = factor->info(xj, xi); - Gij = Gji.transpose(); - } else { - Gij = factor->info(xi, xj); - } - // Accumulate Gij*xj to gradf - Vector x0_j = x0.at(*xj); - gradf_xi += Gij * x0_j; - } - // Subtract the linear term gi - gradf_xi += -factor->linearTerm(xi); - } - - //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - // Compute the Jacobian A for the dual factor Ax-b - // Obtain the jacobians for lambda variables from their corresponding constraints - // A = gradc_k(xi) = \frac{\partial c_k}{\partial xi}' - vector > lambdaTerms; // collection of lambda_k, and gradc_k - typedef pair FactorIx_SigmaIx; - vector unconstrainedIndex; // pairs of factorIx,sigmaIx of unconstrained rows - BOOST_FOREACH(size_t factorIndex, fullFactorIndices_[xiKey]) { - JacobianFactor::shared_ptr factor = toJacobian(graph.at(factorIndex)); - if (!factor || !factor->isConstrained()) - continue; - // Gradient is the transpose of the Jacobian: A_k = gradc_k(xi) = \frac{\partial c_k}{\partial xi}' - // Each column for each lambda_k corresponds to [the transpose of] each constrained row factor - Matrix A_k = factor->getA(factor->find(xiKey)).transpose(); - if (debug) - gtsam::print(A_k, "A_k = "); - - // Deal with mixed sigmas: no information if sigma != 0 - Vector sigmas = factor->get_model()->sigmas(); - for (size_t sigmaIx = 0; sigmaIx < sigmas.size(); ++sigmaIx) { - // if it's either inequality (sigma<0) or unconstrained (sigma>0) - // we have no information about it - if (fabs(sigmas[sigmaIx]) > 1e-9) { - A_k.col(sigmaIx) = zero(A_k.rows()); - // remember to add a zero prior on this lambda, otherwise the graph is under-determined - unconstrainedIndex.push_back(make_pair(factorIndex, sigmaIx)); - } - } - - // Use factorIndex as the lambda's key. - lambdaTerms.push_back(make_pair(factorIndex, A_k)); - } - - //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - // Create and add factors to the dual graph - // If least square approximation is desired, use unit noise model. - if (debug) - cout << "Create dual factor" << endl; - if (useLeastSquare) { - if (debug) - cout << "use least square!" << endl; - dualGraph.push_back( - JacobianFactor(lambdaTerms, gradf_xi, - noiseModel::Unit::Create(gradf_xi.size()))); - } else { - // Enforce constrained noise model so lambdas are solved with QR - // and should exactly satisfy all the equations - if (debug) - cout << gradf_xi << endl; - dualGraph.push_back( - JacobianFactor(lambdaTerms, gradf_xi, - noiseModel::Constrained::All(gradf_xi.size()))); - } - - // Add 0 priors on all lambdas of the unconstrained rows to make sure the graph is solvable - if (debug) - cout << "Create priors" << endl; - BOOST_FOREACH(FactorIx_SigmaIx factorIx_sigmaIx, unconstrainedIndex) { - size_t factorIx = factorIx_sigmaIx.first; - JacobianFactor::shared_ptr factor = toJacobian(graph.at(factorIx)); - size_t dim = factor->get_model()->dim(); - Matrix J = zeros(dim, dim); - size_t sigmaIx = factorIx_sigmaIx.second; - J(sigmaIx, sigmaIx) = 1.0; - // Use factorIndex as the lambda's key. - if (debug) - cout << "prior for factor " << factorIx << endl; - dualGraph.push_back(JacobianFactor(factorIx, J, zero(dim))); - } +GaussianFactorGraph::shared_ptr QPSolver::buildDualGraph( + const LinearInequalityFactorGraph& workingSet, const VectorValues& delta) const { + GaussianFactorGraph::shared_ptr dualGraph(new GaussianFactorGraph()); + BOOST_FOREACH(Key key, constrainedKeys_) { + // Each constrained key becomes a factor in the dual graph + dualGraph->push_back(createDualFactor(key, workingSet, delta)); } - return dualGraph; } //****************************************************************************** pair QPSolver::identifyLeavingConstraint( + const LinearInequalityFactorGraph& workingSet, const VectorValues& lambdas) const { int worstFactorIx = -1, worstSigmaIx = -1; // preset the maxLambda to 0.0: if lambda is <= 0.0, the constraint is either // inactive or a good inequality constraint, so we don't care! double maxLambda = 0.0; - BOOST_FOREACH(size_t factorIx, constraintIndices_) { - Vector lambda = lambdas.at(factorIx); - Vector orgSigmas = toJacobian(graph_.at(factorIx))->get_model()->sigmas(); - for (size_t j = 0; j < orgSigmas.size(); ++j) - // If it is a BAD active inequality, and lambda is larger than the current max - if (orgSigmas[j] < 0 && lambda[j] > maxLambda) { + for (size_t factorIx = 0; factorIx < workingSet.size(); ++factorIx) { + const LinearInequality::shared_ptr& factor = workingSet.at(factorIx); + Vector lambda = lambdas.at(factor->dualKey()); + Vector sigmas = factor->get_model()->sigmas(); + for (size_t j = 0; j < sigmas.size(); ++j) + // If it is an active constraint, and lambda is larger than the current max + if (sigmas[j] == ACTIVE && lambda[j] > maxLambda) { worstFactorIx = factorIx; worstSigmaIx = j; maxLambda = lambda[j]; @@ -264,14 +94,16 @@ pair QPSolver::identifyLeavingConstraint( } //****************************************************************************** -bool QPSolver::updateWorkingSetInplace(GaussianFactorGraph& workingGraph, - int factorIx, int sigmaIx, double newSigma) const { +LinearInequalityFactorGraph QPSolver::updateWorkingSet( + const LinearInequalityFactorGraph& workingSet, int factorIx, int sigmaIx, + double state) const { + LinearInequalityFactorGraph newWorkingSet = workingSet; if (factorIx < 0 || sigmaIx < 0) - return false; - Vector sigmas = toJacobian(workingGraph.at(factorIx))->get_model()->sigmas(); - sigmas[sigmaIx] = newSigma; // removing it from the working set - toJacobian(workingGraph.at(factorIx))->setModel(true, sigmas); - return true; + return newWorkingSet; + Vector sigmas = newWorkingSet.at(factorIx)->get_model()->sigmas(); + sigmas[sigmaIx] = state; + newWorkingSet.at(factorIx)->setModel(true, sigmas); + return newWorkingSet; } //****************************************************************************** @@ -291,44 +123,28 @@ bool QPSolver::updateWorkingSetInplace(GaussianFactorGraph& workingGraph, * We want the minimum of all those alphas among all inactive inequality. */ boost::tuple QPSolver::computeStepSize( - const GaussianFactorGraph& workingGraph, const VectorValues& xk, + const LinearInequalityFactorGraph& workingSet, const VectorValues& xk, const VectorValues& p) const { static bool debug = false; double minAlpha = 1.0; int closestFactorIx = -1, closestSigmaIx = -1; - BOOST_FOREACH(size_t factorIx, constraintIndices_) { - JacobianFactor::shared_ptr jacobian = toJacobian(workingGraph.at(factorIx)); - Vector sigmas = jacobian->get_model()->sigmas(); - Vector b = jacobian->getb(); + for(size_t factorIx = 0; factorIxget_model()->sigmas(); + Vector b = factor->getb(); for (size_t s = 0; s < sigmas.size(); ++s) { // If it is an inactive inequality, compute alpha and update min - if (sigmas[s] < 0) { + if (sigmas[s] == INACTIVE) { // Compute aj'*p - double ajTp = 0.0; - for (Factor::const_iterator xj = jacobian->begin(); - xj != jacobian->end(); ++xj) { - Vector pj = p.at(*xj); - Vector aj = jacobian->getA(xj).row(s); - ajTp += aj.dot(pj); - } - if (debug) - cout << "s, ajTp, b[s]: " << s << " " << ajTp << " " << b[s] << endl; + double ajTp = factor->dotProductRow(s, p); // Check if aj'*p >0. Don't care if it's not. if (ajTp <= 0) continue; // Compute aj'*xk - double ajTx = 0.0; - for (Factor::const_iterator xj = jacobian->begin(); - xj != jacobian->end(); ++xj) { - Vector xkj = xk.at(*xj); - Vector aj = jacobian->getA(xj).row(s); - ajTx += aj.dot(xkj); - } - if (debug) - cout << "b[s], ajTx: " << b[s] << " " << ajTx << " " << ajTp << endl; + double ajTx = factor->dotProductRow(s, xk); // alpha = (bj - aj'*xk) / (aj'*p) double alpha = (b[s] - ajTx) / ajTp; @@ -348,171 +164,224 @@ boost::tuple QPSolver::computeStepSize( } //****************************************************************************** -bool QPSolver::iterateInPlace(GaussianFactorGraph& workingGraph, - VectorValues& currentSolution, VectorValues& lambdas) const { +QPState QPSolver::iterate(const QPState& state) const { static bool debug = false; + + // Solve with the current working set + VectorValues newValues = solveWithCurrentWorkingSet(state.workingSet); if (debug) - workingGraph.print("workingGraph: "); - // Obtain the solution from the current working graph - VectorValues newSolution = workingGraph.optimize(); - if (debug) - newSolution.print("New solution:"); + newValues.print("New solution:"); // If we CAN'T move further - if (newSolution.equals(currentSolution, 1e-5)) { + if (newValues.equals(state.values, 1e-5)) { // Compute lambda from the dual graph if (debug) cout << "Building dual graph..." << endl; - GaussianFactorGraph dualGraph = buildDualGraph(workingGraph, newSolution); + GaussianFactorGraph::shared_ptr dualGraph = buildDualGraph(state.workingSet, newValues); if (debug) - dualGraph.print("Dual graph: "); - lambdas = dualGraph.optimize(); + dualGraph->print("Dual graph: "); + VectorValues duals = dualGraph->optimize(); if (debug) - lambdas.print("lambdas :"); + duals.print("Duals :"); - int factorIx, sigmaIx; - boost::tie(factorIx, sigmaIx) = identifyLeavingConstraint(lambdas); + int leavingFactor, leavingSigmaIx; + boost::tie(leavingFactor, leavingSigmaIx) = // + identifyLeavingConstraint(state.workingSet, duals); if (debug) - cout << "violated active inequality - factorIx, sigmaIx: " << factorIx - << " " << sigmaIx << endl; + cout << "violated active inequality - factorIx, sigmaIx: " << leavingFactor + << " " << leavingSigmaIx << endl; - // Try to de-activate the weakest violated inequality constraints - // if not successful, i.e. all inequality constraints are satisfied: - // We have the solution!! - if (!updateWorkingSetInplace(workingGraph, factorIx, sigmaIx, -1.0)) - return true; - } else { + // If all inequality constraints are satisfied: We have the solution!! + if (leavingFactor < 0 || leavingSigmaIx < 0) { + return QPState(newValues, duals, state.workingSet, true); + } + else { + // Inactivate the leaving constraint + LinearInequalityFactorGraph newWorkingSet = updateWorkingSet( + state.workingSet, leavingFactor, leavingSigmaIx, INACTIVE); + return QPState(newValues, duals, newWorkingSet, false); + } + } + else { // If we CAN make some progress // Adapt stepsize if some inactive constraints complain about this move - if (debug) - cout << "Computing stepsize..." << endl; double alpha; int factorIx, sigmaIx; - VectorValues p = newSolution - currentSolution; + VectorValues p = newValues - state.values; boost::tie(alpha, factorIx, sigmaIx) = // - computeStepSize(workingGraph, currentSolution, p); + computeStepSize(state.workingSet, state.values, p); if (debug) cout << "alpha, factorIx, sigmaIx: " << alpha << " " << factorIx << " " << sigmaIx << endl; // also add to the working set the one that complains the most - updateWorkingSetInplace(workingGraph, factorIx, sigmaIx, 0.0); - // step! - currentSolution = currentSolution + alpha * p; -// if (alpha <1e-5) { -// if (debug) cout << "Building dual graph..." << endl; -// GaussianFactorGraph dualGraph = buildDualGraph(workingGraph, newSolution); -// if (debug) dualGraph.print("Dual graph: "); -// lambdas = dualGraph.optimize(); -// if (debug) lambdas.print("lambdas :"); -// return true; // TODO: HACK HACK!!! -// } - } + LinearInequalityFactorGraph newWorkingSet = // + updateWorkingSet(state.workingSet, factorIx, sigmaIx, ACTIVE); - return false; + // step! + newValues = state.values + alpha * p; + + return QPState(newValues, state.duals, newWorkingSet, false); + } } //****************************************************************************** pair QPSolver::optimize( const VectorValues& initialValues) const { - GaussianFactorGraph workingGraph = graph_.clone(); - VectorValues currentSolution = initialValues; - VectorValues lambdas; - bool converged = false; - while (!converged) { - converged = iterateInPlace(workingGraph, currentSolution, lambdas); + + // TODO: initialize workingSet from the feasible initialValues + LinearInequalityFactorGraph workingSet(qp_.inequalities); + + QPState state(initialValues, VectorValues(), workingSet, false); + + /// main loop of the solver + while (!state.converged) { + state = iterate(state); } - return make_pair(currentSolution, lambdas); + + return make_pair(state.values, state.duals); } //****************************************************************************** -pair QPSolver::initialValuesLP() const { - size_t firstSlackKey = 0; - BOOST_FOREACH(Key key, fullFactorIndices_ | boost::adaptors::map_keys) { - if (firstSlackKey < key) - firstSlackKey = key; - } +std::pair QPSolver::maxKey(const FastSet& keys) const { + KeySet::iterator maxEl = std::max_element(keys.begin(), keys.end()); + if (maxEl==keys.end()) + return make_pair(false, 0); + return make_pair(true, *maxEl); +} + +//****************************************************************************** +boost::tuple QPSolver::initialValuesLP() const { + // Key for the first slack variable = maximum key + 1 + size_t firstSlackKey; + bool found; + KeySet allKeys = qp_.cost.keys(); + allKeys.merge(qp_.equalities.keys()); + allKeys.merge(qp_.inequalities.keys()); + boost::tie(found, firstSlackKey) = maxKey(allKeys); firstSlackKey += 1; VectorValues initialValues; // Create zero values for constrained vars - BOOST_FOREACH(size_t iFactor, constraintIndices_) { - JacobianFactor::shared_ptr jacobian = toJacobian(graph_.at(iFactor)); - KeyVector keys = jacobian->keys(); + BOOST_FOREACH(const LinearEquality::shared_ptr& factor, qp_.equalities) { + KeyVector keys = factor->keys(); BOOST_FOREACH(Key key, keys) { if (!initialValues.exists(key)) { - size_t dim = jacobian->getDim(jacobian->find(key)); + size_t dim = factor->getDim(factor->find(key)); + initialValues.insert(key, zero(dim)); + } + } + } + + BOOST_FOREACH(const LinearInequality::shared_ptr& factor, qp_.inequalities) { + KeyVector keys = factor->keys(); + BOOST_FOREACH(Key key, keys) { + if (!initialValues.exists(key)) { + size_t dim = factor->getDim(factor->find(key)); initialValues.insert(key, zero(dim)); } } } // Insert initial values for slack variables - size_t slackKey = firstSlackKey; - BOOST_FOREACH(size_t iFactor, constraintIndices_) { - JacobianFactor::shared_ptr jacobian = toJacobian(graph_.at(iFactor)); - Vector errorAtZero = jacobian->getb(); - Vector slackInit = zero(errorAtZero.size()); - Vector sigmas = jacobian->get_model()->sigmas(); - for (size_t i = 0; i < sigmas.size(); ++i) { - if (sigmas[i] < 0) { - slackInit[i] = std::max(errorAtZero[i], 0.0); - } else if (sigmas[i] == 0.0) { - errorAtZero[i] = fabs(errorAtZero[i]); - } // if it has >0 sigma, i.e. normal Gaussian noise, initialize it at 0 - } + Key slackKey = firstSlackKey; + // Equality: zi = |bi| + BOOST_FOREACH(const LinearEquality::shared_ptr& factor, qp_.equalities) { + Vector errorAtZero = factor->getb(); + Vector slackInit = errorAtZero.cwiseAbs(); initialValues.insert(slackKey, slackInit); slackKey++; } - return make_pair(initialValues, firstSlackKey); + // Inequality: zi = max(bi, 0) + BOOST_FOREACH(const LinearInequality::shared_ptr& factor, qp_.inequalities) { + Vector errorAtZero = factor->getb(); + Vector zeroVec = zero(errorAtZero.size()); + Vector slackInit = errorAtZero.cwiseMax(zeroVec); + initialValues.insert(slackKey, slackInit); + slackKey++; + } + + return boost::make_tuple(initialValues, firstSlackKey, slackKey - 1); } //****************************************************************************** VectorValues QPSolver::objectiveCoeffsLP(Key firstSlackKey) const { VectorValues slackObjective; - for (size_t i = 0; i < constraintIndices_.size(); ++i) { - Key key = firstSlackKey + i; - size_t iFactor = constraintIndices_[i]; - JacobianFactor::shared_ptr jacobian = toJacobian(graph_.at(iFactor)); - size_t dim = jacobian->rows(); - Vector objective = ones(dim); - /* We should not ignore unconstrained slack var dimensions (those rows with sigmas >0) - * because their values might be underdetermined in the LP. Since they will have only - * 1 constraint zi>=0, enforcing them in the min obj function won't harm the other constrained - * slack vars, but also makes them well defined: 0 at the minimum. - */ - slackObjective.insert(key, ones(dim)); + + Key slackKey = firstSlackKey; + // Equalities + BOOST_FOREACH(const LinearEquality::shared_ptr& factor, qp_.equalities) { + size_t dim = factor->rows(); + slackObjective.insert(slackKey, ones(dim)); + slackKey++; } + + // Inequalities + BOOST_FOREACH(const LinearInequality::shared_ptr& factor, qp_.inequalities) { + size_t dim = factor->rows(); + slackObjective.insert(slackKey, ones(dim)); + slackKey++; + } + return slackObjective; } //****************************************************************************** -pair QPSolver::constraintsLP( +boost::tuple QPSolver::constraintsLP( Key firstSlackKey) const { - // Create constraints and 0 lower bounds (zi>=0) - GaussianFactorGraph::shared_ptr constraints(new GaussianFactorGraph()); + // Create constraints and zero lower bounds (zi>=0) + LinearEqualityFactorGraph::shared_ptr equalities(new LinearEqualityFactorGraph()); + LinearInequalityFactorGraph::shared_ptr inequalities(new LinearInequalityFactorGraph()); VectorValues slackLowerBounds; - for (size_t key = firstSlackKey; - key < firstSlackKey + constraintIndices_.size(); ++key) { - size_t iFactor = constraintIndices_[key - firstSlackKey]; - JacobianFactor::shared_ptr jacobian = toJacobian(graph_.at(iFactor)); + + Key slackKey = firstSlackKey; + + // Equalities + BOOST_FOREACH(const LinearEquality::shared_ptr& factor, qp_.equalities) { // Collect old terms to form a new factor // TODO: it might be faster if we can get the whole block matrix at once // but I don't know how to extend the current VerticalBlockMatrix vector > terms; - for (Factor::iterator it = jacobian->begin(); it != jacobian->end(); ++it) { - terms.push_back(make_pair(*it, jacobian->getA(it))); + for (Factor::iterator it = factor->begin(); it != factor->end(); ++it) { + terms.push_back(make_pair(*it, factor->getA(it))); } + + Vector b = factor->getb(); + Vector sign_b = b.cwiseQuotient(b.cwiseAbs()); + terms.push_back(make_pair(slackKey, sign_b)); + equalities->push_back(LinearEquality(terms, b, factor->dualKey())); + + // Add lower bound for this slack key + slackLowerBounds.insert(slackKey, zero(b.rows())); + // Increase slackKey for the next slack variable + slackKey++; + } + + // Inequalities + BOOST_FOREACH(const LinearInequality::shared_ptr& factor, qp_.inequalities) { + // Collect old terms to form a new factor + // TODO: it might be faster if we can get the whole block matrix at once + // but I don't know how to extend the current VerticalBlockMatrix + vector > terms; + for (Factor::iterator it = factor->begin(); it != factor->end(); ++it) { + terms.push_back(make_pair(*it, factor->getA(it))); + } + // Add the slack term to the constraint // Unlike Nocedal06book, pg.473, we want ax-z <= b, since we always assume - // LE constraints ax <= b for sigma < 0. - size_t dim = jacobian->rows(); - terms.push_back(make_pair(key, -eye(dim))); - constraints->push_back( - JacobianFactor(terms, jacobian->getb(), jacobian->get_model())); + // LE constraints ax <= b. + size_t dim = factor->rows(); + terms.push_back(make_pair(slackKey, -eye(dim))); + inequalities->push_back(LinearInequality(terms, factor->getb(), + factor->dualKey())); + // Add lower bound for this slack key - slackLowerBounds.insert(key, zero(dim)); + slackLowerBounds.insert(slackKey, zero(dim)); + // Increase slackKey for the next slack variable + slackKey++; } - return make_pair(constraints, slackLowerBounds); + + return boost::make_tuple(equalities, inequalities, slackLowerBounds); } //****************************************************************************** @@ -520,19 +389,20 @@ pair QPSolver::findFeasibleInitialValues() const { static const bool debug = false; // Initial values with slack variables for the LP subproblem, Nocedal06book, pg.473 VectorValues initialValues; - size_t firstSlackKey; - boost::tie(initialValues, firstSlackKey) = initialValuesLP(); + size_t firstSlackKey, lastSlackKey; + boost::tie(initialValues, firstSlackKey, lastSlackKey) = initialValuesLP(); // Coefficients for the LP subproblem objective function, min \sum_i z_i VectorValues objectiveLP = objectiveCoeffsLP(firstSlackKey); // Create constraints and lower bounds of slack variables - GaussianFactorGraph::shared_ptr constraints; + LinearEqualityFactorGraph::shared_ptr equalities; + LinearInequalityFactorGraph::shared_ptr inequalities; VectorValues slackLowerBounds; - boost::tie(constraints, slackLowerBounds) = constraintsLP(firstSlackKey); + boost::tie(equalities, inequalities, slackLowerBounds) = constraintsLP(firstSlackKey); // Solve the LP subproblem - LPSolver lpSolver(objectiveLP, constraints, slackLowerBounds); + LPSolver lpSolver(objectiveLP, equalities, inequalities, slackLowerBounds); VectorValues solution = lpSolver.solve(); if (debug) @@ -540,29 +410,34 @@ pair QPSolver::findFeasibleInitialValues() const { if (debug) objectiveLP.print("Objective LP: "); if (debug) - constraints->print("Constraints LP: "); + equalities->print("Equalities LP: "); + if (debug) + inequalities->print("Inequalities LP: "); if (debug) solution.print("LP solution: "); + // feasible when all slack values are 0s. + double slackSumAbs = 0.0; + for (Key key = firstSlackKey; key <= lastSlackKey; ++key) { + slackSumAbs += solution.at(key).cwiseAbs().sum(); + } + // Remove slack variables from solution - double slackSum = 0.0; - for (Key key = firstSlackKey; key < firstSlackKey + constraintIndices_.size(); - ++key) { - slackSum += solution.at(key).cwiseAbs().sum(); + for (Key key = firstSlackKey; key <= lastSlackKey; ++key) { solution.erase(key); } // Insert zero vectors for free variables that are not in the constraints - BOOST_FOREACH(Key key, fullFactorIndices_ | boost::adaptors::map_keys) { + BOOST_FOREACH(Key key, costVariableIndex_ | boost::adaptors::map_keys) { if (!solution.exists(key)) { - GaussianFactor::shared_ptr factor = graph_.at( - *fullFactorIndices_[key].begin()); + GaussianFactor::shared_ptr factor = qp_.cost.at( + *costVariableIndex_[key].begin()); size_t dim = factor->getDim(factor->find(key)); solution.insert(key, zero(dim)); } } - return make_pair(slackSum < 1e-5, solution); + return make_pair(slackSumAbs < 1e-5, solution); } //****************************************************************************** diff --git a/gtsam_unstable/linear/QPSolver.h b/gtsam_unstable/linear/QPSolver.h index ea4c18fa9..b3c66a21f 100644 --- a/gtsam_unstable/linear/QPSolver.h +++ b/gtsam_unstable/linear/QPSolver.h @@ -7,14 +7,34 @@ #pragma once -#include #include +#include #include #include namespace gtsam { +/// This struct holds the state of QPSolver at each iteration +struct QPState { + VectorValues values; + VectorValues duals; + LinearInequalityFactorGraph workingSet; + bool converged; + + /// default constructor + QPState() : + values(), duals(), workingSet(), converged(false) { + } + + /// constructor with initial values + QPState(const VectorValues& initialValues, const VectorValues& initialDuals, + const LinearInequalityFactorGraph& initialWorkingSet, bool _converged) : + values(initialValues), duals(initialDuals), workingSet(initialWorkingSet), converged( + _converged) { + } +}; + /** * This class implements the active set method to solve quadratic programming problems * encoded in a GaussianFactorGraph with special mixed constrained noise models, in which @@ -24,32 +44,41 @@ namespace gtsam { */ class QPSolver { - class Hessians: public FactorGraph { - }; - - const GaussianFactorGraph& graph_; //!< the original graph, can't be modified! - FastVector constraintIndices_; //!< Indices of constrained factors in the original graph - Hessians freeHessians_; //!< unconstrained Hessians of constrained variables - VariableIndex freeHessianFactorIndex_; //!< indices of unconstrained Hessian factors of constrained variables - // gtsam calls it "VariableIndex", but I think FactorIndex - // makes more sense, because it really stores factor indices. - VariableIndex fullFactorIndices_; //!< indices of factors involving each variable. - // gtsam calls it "VariableIndex", but I think FactorIndex - // makes more sense, because it really stores factor indices. + const QP& qp_; //!< factor graphs of the QP problem, can't be modified! + GaussianFactorGraph baseGraph_; //!< factor graphs of cost factors and linear equalities. The working set of inequalities will be added to this base graph in the process. + VariableIndex costVariableIndex_, equalityVariableIndex_, + inequalityVariableIndex_; + FastSet constrainedKeys_; //!< all constrained keys, will become factors in the dual graph public: /// Constructor - QPSolver(const GaussianFactorGraph& graph); + QPSolver(const QP& qp); - /// Return indices of all constrained factors - FastVector constraintIndices() const { - return constraintIndices_; + /// Find solution with the current working set + VectorValues solveWithCurrentWorkingSet( + const LinearInequalityFactorGraph& workingSet) const; + + /// @name Build the dual graph + /// @{ + + /// Collect the Jacobian terms for a dual factor + template + std::vector > collectDualJacobians(Key key, + const FactorGraph& graph, + const VariableIndex& variableIndex) const { + std::vector > Aterms; + BOOST_FOREACH(size_t factorIx, variableIndex[key]){ + typename FACTOR::shared_ptr factor = graph.at(factorIx); + Matrix Ai = factor->getA(factor->find(key)).transpose(); + Aterms.push_back(std::make_pair(factor->dualKey(), Ai)); + } + return Aterms; } - /// Return the Hessian factor graph of constrained variables - const Hessians& freeHessiansOfConstrainedVars() const { - return freeHessians_; - } + /// Create a dual factor + JacobianFactor::shared_ptr createDualFactor(Key key, + const LinearInequalityFactorGraph& workingSet, + const VectorValues& delta) const; /** * Build the dual graph to solve for the Lagrange multipliers. @@ -78,8 +107,11 @@ public: * - The constant term b is \grad f(xi), which can be computed from all unconstrained * Hessian factors connecting to xi: \grad f(xi) = \sum_j G_ij*xj - gi */ - GaussianFactorGraph buildDualGraph(const GaussianFactorGraph& graph, - const VectorValues& x0, bool useLeastSquare = false) const; + GaussianFactorGraph::shared_ptr buildDualGraph( + const LinearInequalityFactorGraph& workingSet, + const VectorValues& delta) const; + + /// @} /** * The goal of this function is to find currently active inequality constraints @@ -116,15 +148,15 @@ public: * */ std::pair identifyLeavingConstraint( + const LinearInequalityFactorGraph& workingSet, const VectorValues& lambdas) const; /** - * Deactivate or activate an inequality constraint in place - * Warning: modify in-place to avoid copy/clone - * @return true if update successful + * Inactivate or activate an inequality constraint */ - bool updateWorkingSetInplace(GaussianFactorGraph& workingGraph, int factorIx, - int sigmaIx, double newSigma) const; + LinearInequalityFactorGraph updateWorkingSet( + const LinearInequalityFactorGraph& workingSet, int factorIx, int sigmaIx, + double state) const; /** * Compute step size alpha for the new solution x' = xk + alpha*p, where alpha \in [0,1] @@ -135,12 +167,11 @@ public: * in the next iteration */ boost::tuple computeStepSize( - const GaussianFactorGraph& workingGraph, const VectorValues& xk, + const LinearInequalityFactorGraph& workingSet, const VectorValues& xk, const VectorValues& p) const; - /** Iterate 1 step, modify workingGraph and currentSolution *IN PLACE* !!! */ - bool iterateInPlace(GaussianFactorGraph& workingGraph, - VectorValues& currentSolution, VectorValues& lambdas) const; + /** Iterate 1 step, return a new state with a new workingSet and values */ + QPState iterate(const QPState& state) const; /** Optimize with a provided initial values * For this version, it is the responsibility of the caller to provide @@ -161,27 +192,27 @@ public: std::pair optimize() const; /** - * Create initial values for the LP subproblem - * @return initial values and the key for the first slack variable + * find the max key */ - std::pair initialValuesLP() const; + std::pair maxKey(const FastSet& keys) const; + + /** + * Create initial values for the LP subproblem + * @return initial values and the key for the first and last slack variables + */ + boost::tuple initialValuesLP() const; /// Create coefficients for the LP subproblem's objective function as the sum of slack var VectorValues objectiveCoeffsLP(Key firstSlackKey) const; /// Build constraints and slacks' lower bounds for the LP subproblem - std::pair constraintsLP( + boost::tuple constraintsLP( Key firstSlackKey) const; /// Find a feasible initial point std::pair findFeasibleInitialValues() const; -private: - - /// Collect all free Hessians involving constrained variables into a graph - void findUnconstrainedHessiansOfConstrainedVars( - const std::set& constrainedVars); - }; } /* namespace gtsam */ diff --git a/gtsam_unstable/linear/tests/testQPSolver.cpp b/gtsam_unstable/linear/tests/testQPSolver.cpp index 6d8e07901..edf64655d 100644 --- a/gtsam_unstable/linear/tests/testQPSolver.cpp +++ b/gtsam_unstable/linear/tests/testQPSolver.cpp @@ -31,14 +31,14 @@ using namespace gtsam::symbol_shorthand; /* ************************************************************************* */ // Create test graph according to Forst10book_pg171Ex5 -GaussianFactorGraph createTestCase() { - GaussianFactorGraph graph; +QP createTestCase() { + QP qp; // Objective functions x1^2 - x1*x2 + x2^2 - 3*x1 + 5 // Note the Hessian encodes: // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // Hence, we have G11=2, G12 = -1, g1 = +3, G22 = 2, g2 = 0, f = 10 - graph.push_back( + qp.cost.push_back( HessianFactor(X(1), X(2), 2.0 * ones(1, 1), -ones(1, 1), 3.0 * ones(1), 2.0 * ones(1, 1), zero(1), 10.0)); @@ -48,12 +48,9 @@ GaussianFactorGraph createTestCase() { Matrix A1 = (Matrix(4, 1) << 1, -1, 0, 1); Matrix A2 = (Matrix(4, 1) << 1, 0, -1, 0); Vector b = (Vector(4) << 2, 0, 0, 1.5); - // Special constrained noise model denoting <= inequalities with negative sigmas - noiseModel::Constrained::shared_ptr noise = - noiseModel::Constrained::MixedSigmas((Vector(4) << -1, -1, -1, -1)); - graph.push_back(JacobianFactor(X(1), A1, X(2), A2, b, noise)); + qp.inequalities.push_back(LinearInequality(X(1), A1, X(2), A2, b, 0)); - return graph; + return qp; } TEST(QPSolver, TestCase) { @@ -61,49 +58,43 @@ TEST(QPSolver, TestCase) { double x1 = 5, x2 = 7; values.insert(X(1), x1 * ones(1, 1)); values.insert(X(2), x2 * ones(1, 1)); - GaussianFactorGraph graph = createTestCase(); - DOUBLES_EQUAL(29, x1*x1 - x1*x2 + x2*x2 - 3*x1 + 5, 1e-9); - DOUBLES_EQUAL(29, graph[0]->error(values), 1e-9); + QP qp = createTestCase(); + DOUBLES_EQUAL(29, x1 * x1 - x1 * x2 + x2 * x2 - 3 * x1 + 5, 1e-9); + DOUBLES_EQUAL(29, qp.cost[0]->error(values), 1e-9); } TEST(QPSolver, constraintsAux) { - GaussianFactorGraph graph = createTestCase(); + QP qp = createTestCase(); - QPSolver solver(graph); - FastVector constraintIx = solver.constraintIndices(); - LONGS_EQUAL(1, constraintIx.size()); - LONGS_EQUAL(1, constraintIx[0]); + QPSolver solver(qp); VectorValues lambdas; - lambdas.insert(constraintIx[0], (Vector(4) << -0.5, 0.0, 0.3, 0.1)); + lambdas.insert(0, (Vector(4) << -0.5, 0.0, 0.3, 0.1)); int factorIx, lambdaIx; - boost::tie(factorIx, lambdaIx) = solver.identifyLeavingConstraint(lambdas); + boost::tie(factorIx, lambdaIx) = solver.identifyLeavingConstraint( + qp.inequalities, lambdas); LONGS_EQUAL(1, factorIx); LONGS_EQUAL(2, lambdaIx); VectorValues lambdas2; - lambdas2.insert(constraintIx[0], (Vector(4) << -0.5, 0.0, -0.3, -0.1)); + lambdas2.insert(0, (Vector(4) << -0.5, 0.0, -0.3, -0.1)); int factorIx2, lambdaIx2; boost::tie(factorIx2, lambdaIx2) = solver.identifyLeavingConstraint( - lambdas2); + qp.inequalities, lambdas2); LONGS_EQUAL(-1, factorIx2); LONGS_EQUAL(-1, lambdaIx2); - - HessianFactor expectedFreeHessian(X(1), X(2), 2.0 * ones(1, 1), -ones(1, 1), - 3.0 * ones(1), 2.0 * ones(1, 1), zero(1), 1.0); - EXPECT(solver.freeHessiansOfConstrainedVars()[0]->equals(expectedFreeHessian)); } /* ************************************************************************* */ // Create a simple test graph with one equality constraint -GaussianFactorGraph createEqualityConstrainedTest() { - GaussianFactorGraph graph; +QP createEqualityConstrainedTest() { + QP qp; // Objective functions x1^2 + x2^2 // Note the Hessian encodes: // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // Hence, we have G11=2, G12 = 0, g1 = 0, G22 = 2, g2 = 0, f = 0 - graph.push_back( + qp.cost.push_back( HessianFactor(X(1), X(2), 2.0 * ones(1, 1), zeros(1, 1), zero(1), 2.0 * ones(1, 1), zero(1), 0.0)); @@ -112,26 +103,24 @@ GaussianFactorGraph createEqualityConstrainedTest() { Matrix A1 = (Matrix(1, 1) << 1); Matrix A2 = (Matrix(1, 1) << 1); Vector b = -(Vector(1) << 1); - // Special constrained noise model denoting <= inequalities with negative sigmas - noiseModel::Constrained::shared_ptr noise = - noiseModel::Constrained::MixedSigmas((Vector(1) << 0.0)); - graph.push_back(JacobianFactor(X(1), A1, X(2), A2, b, noise)); + qp.equalities.push_back(LinearEquality(X(1), A1, X(2), A2, b, 0)); - return graph; + return qp; } TEST(QPSolver, dual) { - GaussianFactorGraph graph = createEqualityConstrainedTest(); + QP qp = createEqualityConstrainedTest(); // Initials values VectorValues initialValues; initialValues.insert(X(1), ones(1)); initialValues.insert(X(2), ones(1)); - QPSolver solver(graph); + QPSolver solver(qp); - GaussianFactorGraph dualGraph = solver.buildDualGraph(graph, initialValues); - VectorValues dual = dualGraph.optimize(); + GaussianFactorGraph::shared_ptr dualGraph = solver.buildDualGraph( + qp.inequalities, initialValues); + VectorValues dual = dualGraph->optimize(); VectorValues expectedDual; expectedDual.insert(1, (Vector(1) << 2.0)); CHECK(assert_equal(expectedDual, dual, 1e-10)); @@ -139,39 +128,36 @@ TEST(QPSolver, dual) { /* ************************************************************************* */ -TEST(QPSolver, iterate) { - GaussianFactorGraph graph = createTestCase(); - QPSolver solver(graph); - - GaussianFactorGraph workingGraph = graph.clone(); - - VectorValues currentSolution; - currentSolution.insert(X(1), zero(1)); - currentSolution.insert(X(2), zero(1)); - - std::vector expectedSolutions(3); - expectedSolutions[0].insert(X(1), (Vector(1) << 4.0 / 3.0)); - expectedSolutions[0].insert(X(2), (Vector(1) << 2.0 / 3.0)); - expectedSolutions[1].insert(X(1), (Vector(1) << 1.5)); - expectedSolutions[1].insert(X(2), (Vector(1) << 0.5)); - expectedSolutions[2].insert(X(1), (Vector(1) << 1.5)); - expectedSolutions[2].insert(X(2), (Vector(1) << 0.5)); - - bool converged = false; - int it = 0; - while (!converged) { - VectorValues lambdas; - converged = solver.iterateInPlace(workingGraph, currentSolution, lambdas); - CHECK(assert_equal(expectedSolutions[it], currentSolution, 1e-100)); - it++; - } -} - +//TEST(QPSolver, iterate) { +// QP qp = createTestCase(); +// QPSolver solver(qp); +// +// VectorValues currentSolution; +// currentSolution.insert(X(1), zero(1)); +// currentSolution.insert(X(2), zero(1)); +// +// std::vector expectedSolutions(3); +// expectedSolutions[0].insert(X(1), (Vector(1) << 4.0 / 3.0)); +// expectedSolutions[0].insert(X(2), (Vector(1) << 2.0 / 3.0)); +// expectedSolutions[1].insert(X(1), (Vector(1) << 1.5)); +// expectedSolutions[1].insert(X(2), (Vector(1) << 0.5)); +// expectedSolutions[2].insert(X(1), (Vector(1) << 1.5)); +// expectedSolutions[2].insert(X(2), (Vector(1) << 0.5)); +// +// bool converged = false; +// int it = 0; +// while (!converged) { +// VectorValues lambdas; +// converged = solver.iterateInPlace(workingGraph, currentSolution, lambdas); +// CHECK(assert_equal(expectedSolutions[it], currentSolution, 1e-100)); +// it++; +// } +//} /* ************************************************************************* */ TEST(QPSolver, optimizeForst10book_pg171Ex5) { - GaussianFactorGraph graph = createTestCase(); - QPSolver solver(graph); + QP qp = createTestCase(); + QPSolver solver(qp); VectorValues initialValues; initialValues.insert(X(1), zero(1)); initialValues.insert(X(2), zero(1)); @@ -185,14 +171,14 @@ TEST(QPSolver, optimizeForst10book_pg171Ex5) { /* ************************************************************************* */ // Create Matlab's test graph as in http://www.mathworks.com/help/optim/ug/quadprog.html -GaussianFactorGraph createTestMatlabQPEx() { - GaussianFactorGraph graph; +QP createTestMatlabQPEx() { + QP qp; // Objective functions 0.5*x1^2 + x2^2 - x1*x2 - 2*x1 -6*x2 // Note the Hessian encodes: // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // Hence, we have G11=1, G12 = -1, g1 = +2, G22 = 2, g2 = +6, f = 0 - graph.push_back( + qp.cost.push_back( HessianFactor(X(1), X(2), 1.0 * ones(1, 1), -ones(1, 1), 2.0 * ones(1), 2.0 * ones(1, 1), 6 * ones(1), 1000.0)); @@ -202,17 +188,14 @@ GaussianFactorGraph createTestMatlabQPEx() { Matrix A1 = (Matrix(5, 1) << 1, -1, 2, -1, 0); Matrix A2 = (Matrix(5, 1) << 1, 2, 1, 0, -1); Vector b = (Vector(5) << 2, 2, 3, 0, 0); - // Special constrained noise model denoting <= inequalities with negative sigmas - noiseModel::Constrained::shared_ptr noise = - noiseModel::Constrained::MixedSigmas((Vector(5) << -1, -1, -1, -1, -1)); - graph.push_back(JacobianFactor(X(1), A1, X(2), A2, b, noise)); + qp.inequalities.push_back(LinearInequality(X(1), A1, X(2), A2, b, 0)); - return graph; + return qp; } TEST(QPSolver, optimizeMatlabEx) { - GaussianFactorGraph graph = createTestMatlabQPEx(); - QPSolver solver(graph); + QP qp = createTestMatlabQPEx(); + QPSolver solver(qp); VectorValues initialValues; initialValues.insert(X(1), zero(1)); initialValues.insert(X(2), zero(1)); @@ -226,33 +209,30 @@ TEST(QPSolver, optimizeMatlabEx) { /* ************************************************************************* */ // Create test graph as in Nocedal06book, Ex 16.4, pg. 475 -GaussianFactorGraph createTestNocedal06bookEx16_4() { - GaussianFactorGraph graph; +QP createTestNocedal06bookEx16_4() { + QP qp; - graph.push_back(JacobianFactor(X(1), ones(1, 1), ones(1))); - graph.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1))); + qp.cost.push_back(JacobianFactor(X(1), ones(1, 1), ones(1))); + qp.cost.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1))); // Inequality constraints - noiseModel::Constrained::shared_ptr noise = - noiseModel::Constrained::MixedSigmas((Vector(1) << -1)); - graph.push_back( - JacobianFactor(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), 2 * ones(1), - noise)); - graph.push_back( - JacobianFactor(X(1), ones(1, 1), X(2), 2 * ones(1, 1), 6 * ones(1), - noise)); - graph.push_back( - JacobianFactor(X(1), ones(1, 1), X(2), -2 * ones(1, 1), 2 * ones(1), - noise)); - graph.push_back(JacobianFactor(X(1), -ones(1, 1), zero(1), noise)); - graph.push_back(JacobianFactor(X(2), -ones(1, 1), zero(1), noise)); + qp.inequalities.push_back( + LinearInequality(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), 2 * ones(1), + 0)); + qp.inequalities.push_back( + LinearInequality(X(1), ones(1, 1), X(2), 2 * ones(1, 1), 6 * ones(1), 1)); + qp.inequalities.push_back( + LinearInequality(X(1), ones(1, 1), X(2), -2 * ones(1, 1), 2 * ones(1), + 2)); + qp.inequalities.push_back(LinearInequality(X(1), -ones(1, 1), zero(1), 3)); + qp.inequalities.push_back(LinearInequality(X(2), -ones(1, 1), zero(1), 4)); - return graph; + return qp; } TEST(QPSolver, optimizeNocedal06bookEx16_4) { - GaussianFactorGraph graph = createTestNocedal06bookEx16_4(); - QPSolver solver(graph); + QP qp = createTestNocedal06bookEx16_4(); + QPSolver solver(qp); VectorValues initialValues; initialValues.insert(X(1), (Vector(1) << 2.0)); initialValues.insert(X(2), zero(1)); @@ -288,88 +268,87 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4) { 2.0000 0.5000 */ -GaussianFactorGraph modifyNocedal06bookEx16_4() { - GaussianFactorGraph graph; +QP modifyNocedal06bookEx16_4() { + QP qp; - graph.push_back(JacobianFactor(X(1), ones(1, 1), ones(1))); - graph.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1))); + qp.cost.push_back(JacobianFactor(X(1), ones(1, 1), ones(1))); + qp.cost.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1))); // Inequality constraints noiseModel::Constrained::shared_ptr noise = noiseModel::Constrained::MixedSigmas((Vector(1) << -1)); - graph.push_back( - JacobianFactor(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), -1 * ones(1), - noise)); - graph.push_back( - JacobianFactor(X(1), ones(1, 1), X(2), 2 * ones(1, 1), 6 * ones(1), - noise)); - graph.push_back( - JacobianFactor(X(1), ones(1, 1), X(2), -2 * ones(1, 1), 2 * ones(1), - noise)); - graph.push_back(JacobianFactor(X(1), -ones(1, 1), zero(1), noise)); - graph.push_back(JacobianFactor(X(2), -ones(1, 1), zero(1), noise)); + qp.inequalities.push_back( + LinearInequality(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), -1 * ones(1), + 0)); + qp.inequalities.push_back( + LinearInequality(X(1), ones(1, 1), X(2), 2 * ones(1, 1), 6 * ones(1), 1)); + qp.inequalities.push_back( + LinearInequality(X(1), ones(1, 1), X(2), -2 * ones(1, 1), 2 * ones(1), + 2)); + qp.inequalities.push_back(LinearInequality(X(1), -ones(1, 1), zero(1), 3)); + qp.inequalities.push_back(LinearInequality(X(2), -ones(1, 1), zero(1), 4)); - return graph; + return qp; } TEST(QPSolver, optimizeNocedal06bookEx16_4_findInitialPoint) { - GaussianFactorGraph graph = modifyNocedal06bookEx16_4(); - QPSolver solver(graph); + QP qp = modifyNocedal06bookEx16_4(); + QPSolver solver(qp); VectorValues initialsLP; - Key firstSlackKey; - boost::tie(initialsLP, firstSlackKey) = solver.initialValuesLP(); + Key firstSlackKey, lastSlackKey; + boost::tie(initialsLP, firstSlackKey, lastSlackKey) = solver.initialValuesLP(); EXPECT(assert_equal(zero(1), initialsLP.at(X(1)))); EXPECT(assert_equal(zero(1), initialsLP.at(X(2)))); - LONGS_EQUAL(X(2)+1, firstSlackKey); + LONGS_EQUAL(X(2) + 1, firstSlackKey); EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey))); - EXPECT(assert_equal(ones(1)*6.0, initialsLP.at(firstSlackKey+1))); - EXPECT(assert_equal(ones(1)*2.0, initialsLP.at(firstSlackKey+2))); - EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey+3))); - EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey+4))); + EXPECT(assert_equal(ones(1) * 6.0, initialsLP.at(firstSlackKey + 1))); + EXPECT(assert_equal(ones(1) * 2.0, initialsLP.at(firstSlackKey + 2))); + EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey + 3))); + EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey + 4))); VectorValues objCoeffs = solver.objectiveCoeffsLP(firstSlackKey); for (size_t i = 0; i < 5; ++i) - EXPECT(assert_equal(ones(1), objCoeffs.at(firstSlackKey+i))); + EXPECT(assert_equal(ones(1), objCoeffs.at(firstSlackKey + i))); - GaussianFactorGraph::shared_ptr constraints; + LinearEqualityFactorGraph::shared_ptr equalities; + LinearInequalityFactorGraph::shared_ptr inequalities; VectorValues lowerBounds; - boost::tie(constraints, lowerBounds) = solver.constraintsLP(firstSlackKey); + boost::tie(equalities, inequalities, lowerBounds) = solver.constraintsLP( + firstSlackKey); for (size_t i = 0; i < 5; ++i) - EXPECT(assert_equal(zero(1), lowerBounds.at(firstSlackKey+i))); + EXPECT(assert_equal(zero(1), lowerBounds.at(firstSlackKey + i))); - GaussianFactorGraph expectedConstraints; - noiseModel::Constrained::shared_ptr noise = - noiseModel::Constrained::MixedSigmas((Vector(1) << -1)); - expectedConstraints.push_back( - JacobianFactor(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), X(3), -ones(1, 1), - -1 * ones(1), noise)); - expectedConstraints.push_back( - JacobianFactor(X(1), ones(1, 1), X(2), 2 * ones(1, 1), X(4), -ones(1, 1), - 6 * ones(1), noise)); - expectedConstraints.push_back( - JacobianFactor(X(1), ones(1, 1), X(2), -2 * ones(1, 1), X(5), -ones(1, 1), - 2 * ones(1), noise)); - expectedConstraints.push_back( - JacobianFactor(X(1), -ones(1, 1), X(6), -ones(1, 1), zero(1), noise)); - expectedConstraints.push_back( - JacobianFactor(X(2), -ones(1, 1), X(7), -ones(1, 1), zero(1), noise)); - EXPECT(assert_equal(expectedConstraints, *constraints)); + LinearInequalityFactorGraph expectedInequalities; + expectedInequalities.push_back( + LinearInequality(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), X(3), + -ones(1, 1), -1 * ones(1), 0)); + expectedInequalities.push_back( + LinearInequality(X(1), ones(1, 1), X(2), 2 * ones(1, 1), X(4), + -ones(1, 1), 6 * ones(1), 1)); + expectedInequalities.push_back( + LinearInequality(X(1), ones(1, 1), X(2), -2 * ones(1, 1), X(5), + -ones(1, 1), 2 * ones(1), 2)); + expectedInequalities.push_back( + LinearInequality(X(1), -ones(1, 1), X(6), -ones(1, 1), zero(1), 3)); + expectedInequalities.push_back( + LinearInequality(X(2), -ones(1, 1), X(7), -ones(1, 1), zero(1), 4)); + EXPECT(assert_equal(expectedInequalities, *inequalities)); bool isFeasible; VectorValues initialValues; boost::tie(isFeasible, initialValues) = solver.findFeasibleInitialValues(); - EXPECT(assert_equal(1.0*ones(1), initialValues.at(X(1)))); - EXPECT(assert_equal(0.0*ones(1), initialValues.at(X(2)))); + EXPECT(assert_equal(1.0 * ones(1), initialValues.at(X(1)))); + EXPECT(assert_equal(0.0 * ones(1), initialValues.at(X(2)))); VectorValues solution; boost::tie(solution, boost::tuples::ignore) = solver.optimize(); - EXPECT(assert_equal(2.0*ones(1), solution.at(X(1)))); - EXPECT(assert_equal(0.5*ones(1), solution.at(X(2)))); + EXPECT(assert_equal(2.0 * ones(1), solution.at(X(1)))); + EXPECT(assert_equal(0.5 * ones(1), solution.at(X(2)))); } TEST(QPSolver, optimizeNocedal06bookEx16_4_2) { - GaussianFactorGraph graph = createTestNocedal06bookEx16_4(); - QPSolver solver(graph); + QP qp = createTestNocedal06bookEx16_4(); + QPSolver solver(qp); VectorValues initialValues; initialValues.insert(X(1), (Vector(1) << 0.0)); initialValues.insert(X(2), (Vector(1) << 100.0)); @@ -391,17 +370,16 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4_2) { /* ************************************************************************* */ TEST(QPSolver, failedSubproblem) { - GaussianFactorGraph graph; - graph.push_back(JacobianFactor(X(1), eye(2), zero(2))); - graph.push_back(HessianFactor(X(1), zeros(2, 2), zero(2), 100.0)); - graph.push_back( - JacobianFactor(X(1), (Matrix(1, 2) << -1.0, 0.0), -ones(1), - noiseModel::Constrained::MixedSigmas(-ones(1)))); + QP qp; + qp.cost.push_back(JacobianFactor(X(1), eye(2), zero(2))); + qp.cost.push_back(HessianFactor(X(1), zeros(2, 2), zero(2), 100.0)); + qp.inequalities.push_back( + LinearInequality(X(1), (Matrix(1, 2) << -1.0, 0.0), -ones(1), 0)); VectorValues expected; expected.insert(X(1), (Vector(2) << 1.0, 0.0)); - QPSolver solver(graph); + QPSolver solver(qp); VectorValues solution; boost::tie(solution, boost::tuples::ignore) = solver.optimize(); // graph.print("Graph: ");