Headers and standard formatting

release/4.3a0
dellaert 2014-11-26 09:04:34 +01:00
parent b9d0373c47
commit 7aaf6a1e82
5 changed files with 365 additions and 272 deletions

View File

@ -5,10 +5,12 @@
* @author: Duy-Nguyen Ta * @author: Duy-Nguyen Ta
*/ */
#include <gtsam_unstable/linear/LPSolver.h>
#include <gtsam/inference/Symbol.h>
#include <boost/format.hpp>
#include <boost/foreach.hpp> #include <boost/foreach.hpp>
#include <boost/range/adaptor/map.hpp> #include <boost/range/adaptor/map.hpp>
#include <gtsam/inference/Symbol.h>
#include <gtsam_unstable/linear/LPSolver.h>
using namespace std; using namespace std;
using namespace gtsam; using namespace gtsam;
@ -21,7 +23,7 @@ void LPSolver::buildMetaInformation() {
// Collect variables in objective function first // Collect variables in objective function first
BOOST_FOREACH(Key key, objectiveCoeffs_ | boost::adaptors::map_keys) { BOOST_FOREACH(Key key, objectiveCoeffs_ | boost::adaptors::map_keys) {
variableColumnNo_.insert(make_pair(key, firstVarIndex)); variableColumnNo_.insert(make_pair(key, firstVarIndex));
variableDims_.insert(make_pair(key,objectiveCoeffs_.dim(key))); variableDims_.insert(make_pair(key, objectiveCoeffs_.dim(key)));
firstVarIndex += variableDims_[key]; firstVarIndex += variableDims_[key];
freeVars_.insert(key); freeVars_.insert(key);
} }
@ -29,32 +31,32 @@ void LPSolver::buildMetaInformation() {
VariableIndex factorIndex(*constraints_); VariableIndex factorIndex(*constraints_);
BOOST_FOREACH(Key key, factorIndex | boost::adaptors::map_keys) { BOOST_FOREACH(Key key, factorIndex | boost::adaptors::map_keys) {
if (!variableColumnNo_.count(key)) { if (!variableColumnNo_.count(key)) {
JacobianFactor::shared_ptr jacobian = boost::dynamic_pointer_cast<JacobianFactor> JacobianFactor::shared_ptr jacobian = boost::dynamic_pointer_cast<
(constraints_->at(*factorIndex[key].begin())); JacobianFactor>(constraints_->at(*factorIndex[key].begin()));
if (!jacobian || !jacobian->isConstrained()) { if (!jacobian || !jacobian->isConstrained()) {
throw std::runtime_error("Invalid constrained graph!"); throw runtime_error("Invalid constrained graph!");
} }
size_t dim = jacobian->getDim(jacobian->find(key)); size_t dim = jacobian->getDim(jacobian->find(key));
variableColumnNo_.insert(make_pair(key, firstVarIndex)); variableColumnNo_.insert(make_pair(key, firstVarIndex));
variableDims_.insert(make_pair(key,dim)); variableDims_.insert(make_pair(key, dim));
firstVarIndex += variableDims_[key]; firstVarIndex += variableDims_[key];
freeVars_.insert(key); freeVars_.insert(key);
} }
} }
// Collect the remaining keys in lowerBounds // Collect the remaining keys in lowerBounds
BOOST_FOREACH(Key key, lowerBounds_ | boost::adaptors::map_keys){ BOOST_FOREACH(Key key, lowerBounds_ | boost::adaptors::map_keys) {
if (!variableColumnNo_.count(key)) { if (!variableColumnNo_.count(key)) {
variableColumnNo_.insert(make_pair(key, firstVarIndex)); variableColumnNo_.insert(make_pair(key, firstVarIndex));
variableDims_.insert(make_pair(key,lowerBounds_.dim(key))); variableDims_.insert(make_pair(key, lowerBounds_.dim(key)));
firstVarIndex += variableDims_[key]; firstVarIndex += variableDims_[key];
} }
freeVars_.erase(key); freeVars_.erase(key);
} }
// Collect the remaining keys in upperBounds // Collect the remaining keys in upperBounds
BOOST_FOREACH(Key key, upperBounds_ | boost::adaptors::map_keys){ BOOST_FOREACH(Key key, upperBounds_ | boost::adaptors::map_keys) {
if (!variableColumnNo_.count(key)) { if (!variableColumnNo_.count(key)) {
variableColumnNo_.insert(make_pair(key, firstVarIndex)); variableColumnNo_.insert(make_pair(key, firstVarIndex));
variableDims_.insert(make_pair(key,upperBounds_.dim(key))); variableDims_.insert(make_pair(key, upperBounds_.dim(key)));
firstVarIndex += variableDims_[key]; firstVarIndex += variableDims_[key];
} }
freeVars_.erase(key); freeVars_.erase(key);
@ -67,7 +69,7 @@ void LPSolver::buildMetaInformation() {
void LPSolver::addConstraints(const boost::shared_ptr<lprec>& lp, void LPSolver::addConstraints(const boost::shared_ptr<lprec>& lp,
const JacobianFactor::shared_ptr& jacobian) const { const JacobianFactor::shared_ptr& jacobian) const {
if (!jacobian || !jacobian->isConstrained()) if (!jacobian || !jacobian->isConstrained())
throw std::runtime_error("LP only accepts constrained factors!"); throw runtime_error("LP only accepts constrained factors!");
// Build column number from keys // Build column number from keys
KeyVector keys = jacobian->keys(); KeyVector keys = jacobian->keys();
@ -77,7 +79,7 @@ void LPSolver::addConstraints(const boost::shared_ptr<lprec>& lp,
Vector sigmas = jacobian->get_model()->sigmas(); Vector sigmas = jacobian->get_model()->sigmas();
Matrix A = jacobian->getA(); Matrix A = jacobian->getA();
Vector b = jacobian->getb(); Vector b = jacobian->getb();
for (int i = 0; i<A.rows(); ++i) { for (int i = 0; i < A.rows(); ++i) {
// A.row(i).data() gives wrong result so have to make a copy // A.row(i).data() gives wrong result so have to make a copy
// TODO: Why? Probably because lpsolve's add_constraintex modifies this raw buffer!!! // TODO: Why? Probably because lpsolve's add_constraintex modifies this raw buffer!!!
Vector r = A.row(i); Vector r = A.row(i);
@ -86,39 +88,39 @@ void LPSolver::addConstraints(const boost::shared_ptr<lprec>& lp,
// so we have to make a new copy for every row!!!!! // so we have to make a new copy for every row!!!!!
vector<int> columnNoCopy(columnNo); vector<int> columnNoCopy(columnNo);
if (sigmas[i]>0) { if (sigmas[i] > 0) {
cout << "Warning: Ignore Gaussian noise (sigma>0) in LP constraints!" << endl; cout << "Warning: Ignore Gaussian noise (sigma>0) in LP constraints!"
<< endl;
} }
int constraintType = (sigmas[i]<0)?LE:EQ; int constraintType = (sigmas[i] < 0) ? LE : EQ;
if(!add_constraintex(lp.get(), columnNoCopy.size(), r.data(), columnNoCopy.data(), if (!add_constraintex(lp.get(), columnNoCopy.size(), r.data(),
constraintType, b[i])) columnNoCopy.data(), constraintType, b[i]))
throw runtime_error("LP can't accept Gaussian noise!"); throw runtime_error("LP can't accept Gaussian noise!");
} }
} }
/* ************************************************************************* */ /* ************************************************************************* */
void LPSolver::addBounds(const boost::shared_ptr<lprec>& lp) const { void LPSolver::addBounds(const boost::shared_ptr<lprec>& lp) const {
// Set lower bounds // Set lower bounds
BOOST_FOREACH(Key key, lowerBounds_ | boost::adaptors::map_keys){ BOOST_FOREACH(Key key, lowerBounds_ | boost::adaptors::map_keys) {
Vector lb = lowerBounds_.at(key); Vector lb = lowerBounds_.at(key);
for (size_t i = 0; i<lb.size(); ++i) { for (size_t i = 0; i < lb.size(); ++i) {
set_lowbo(lp.get(), variableColumnNo_.at(key)+i, lb[i]); set_lowbo(lp.get(), variableColumnNo_.at(key) + i, lb[i]);
} }
} }
// Set upper bounds // Set upper bounds
BOOST_FOREACH(Key key, upperBounds_ | boost::adaptors::map_keys){ BOOST_FOREACH(Key key, upperBounds_ | boost::adaptors::map_keys) {
Vector ub = upperBounds_.at(key); Vector ub = upperBounds_.at(key);
for (size_t i = 0; i<ub.size(); ++i) { for (size_t i = 0; i < ub.size(); ++i) {
set_upbo(lp.get(), variableColumnNo_.at(key)+i, ub[i]); set_upbo(lp.get(), variableColumnNo_.at(key) + i, ub[i]);
} }
} }
// Set the rest as free variables // Set the rest as free variables
BOOST_FOREACH(Key key, freeVars_) { BOOST_FOREACH(Key key, freeVars_) {
for (size_t i = 0; i<variableDims_.at(key); ++i) { for (size_t i = 0; i < variableDims_.at(key); ++i) {
set_unbounded(lp.get(), variableColumnNo_.at(key)+i); set_unbounded(lp.get(), variableColumnNo_.at(key) + i);
} }
} }
} }
@ -132,8 +134,8 @@ boost::shared_ptr<lprec> LPSolver::buildModel() const {
// Add constraints // Add constraints
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, *constraints_) { BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, *constraints_) {
JacobianFactor::shared_ptr jacobian = JacobianFactor::shared_ptr jacobian = boost::dynamic_pointer_cast<
boost::dynamic_pointer_cast<JacobianFactor>(factor); JacobianFactor>(factor);
addConstraints(lp, jacobian); addConstraints(lp, jacobian);
} }
@ -152,8 +154,8 @@ boost::shared_ptr<lprec> LPSolver::buildModel() const {
Vector f = objectiveCoeffs_.vector(keys); Vector f = objectiveCoeffs_.vector(keys);
vector<int> columnNo = buildColumnNo(keys); vector<int> columnNo = buildColumnNo(keys);
if(!set_obj_fnex(lp.get(), f.size(), f.data(), columnNo.data())) if (!set_obj_fnex(lp.get(), f.size(), f.data(), columnNo.data()))
throw std::runtime_error("lpsolve cannot set obj function!"); throw runtime_error("lpsolve cannot set obj function!");
// Set the object direction to minimize // Set the object direction to minimize
set_minim(lp.get()); set_minim(lp.get());
@ -169,7 +171,8 @@ VectorValues LPSolver::convertToVectorValues(REAL* row) const {
VectorValues values; VectorValues values;
BOOST_FOREACH(Key key, variableColumnNo_ | boost::adaptors::map_keys) { BOOST_FOREACH(Key key, variableColumnNo_ | boost::adaptors::map_keys) {
// Warning: the columnNo starts from 1, but C's array index starts from 0!! // Warning: the columnNo starts from 1, but C's array index starts from 0!!
Vector v = Eigen::Map<Eigen::VectorXd>(&row[variableColumnNo_.at(key)-1], variableDims_.at(key)); Vector v = Eigen::Map<Eigen::VectorXd>(&row[variableColumnNo_.at(key) - 1],
variableDims_.at(key));
values.insert(key, v); values.insert(key, v);
} }
return values; return values;
@ -183,13 +186,15 @@ VectorValues LPSolver::solve() const {
/* just out of curioucity, now show the model in lp format on screen */ /* just out of curioucity, now show the model in lp format on screen */
/* this only works if this is a console application. If not, use write_lp and a filename */ /* this only works if this is a console application. If not, use write_lp and a filename */
if (debug) write_LP(lp.get(), stdout); if (debug)
write_LP(lp.get(), stdout);
int ret = ::solve(lp.get()); int ret = ::solve(lp.get());
if (ret != 0) { if (ret != 0) {
throw std::runtime_error( throw runtime_error(
(boost::format( "lpsolve cannot find the optimal solution and terminates with %d error. "\ (boost::format(
"See lpsolve's solve() documentation for details.") % ret).str()); "lpsolve cannot find the optimal solution and terminates with %d error. "
"See lpsolve's solve() documentation for details.") % ret).str());
} }
REAL* row = NULL; REAL* row = NULL;
get_ptr_variables(lp.get(), &row); get_ptr_variables(lp.get(), &row);

View File

@ -7,13 +7,14 @@
#pragma once #pragma once
#include <boost/range/irange.hpp>
#include <gtsam/3rdparty/lp_solve_5.5/lp_lib.h>
#include <gtsam/linear/GaussianFactorGraph.h> #include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/VectorValues.h> #include <gtsam/linear/VectorValues.h>
#include <gtsam/inference/Symbol.h> #include <gtsam/inference/Symbol.h>
#include <gtsam/3rdparty/lp_solve_5.5/lp_lib.h>
#include <boost/range/irange.hpp>
namespace gtsam { namespace gtsam {
/** /**
@ -36,25 +37,35 @@ public:
* We do NOT adopt this convention here. If no lower/upper bounds are specified, the variable will be * We do NOT adopt this convention here. If no lower/upper bounds are specified, the variable will be
* set as unbounded, i.e. -inf <= x <= inf. * set as unbounded, i.e. -inf <= x <= inf.
*/ */
LPSolver(const VectorValues& objectiveCoeffs, const GaussianFactorGraph::shared_ptr& constraints, LPSolver(const VectorValues& objectiveCoeffs,
const VectorValues& lowerBounds = VectorValues(), const VectorValues& upperBounds = VectorValues()) : const GaussianFactorGraph::shared_ptr& constraints,
objectiveCoeffs_(objectiveCoeffs), constraints_(constraints), lowerBounds_( const VectorValues& lowerBounds = VectorValues(),
lowerBounds), upperBounds_(upperBounds) { const VectorValues& upperBounds = VectorValues()) :
objectiveCoeffs_(objectiveCoeffs), constraints_(constraints), lowerBounds_(
lowerBounds), upperBounds_(upperBounds) {
buildMetaInformation(); buildMetaInformation();
} }
/** /**
* Build data structures to support converting between gtsam and lpsolve * Build data structures to support converting between gtsam and lpsolve
* TODO: consider lp as a class variable and support setConstraints * TODO: consider lp as a class variable and support setConstraints
* to avoid rebuild this meta data * to avoid rebuild this meta data
*/ */
void buildMetaInformation(); void buildMetaInformation();
/// Get functions for unittest checking /// Get functions for unittest checking
const std::map<Key, size_t>& variableColumnNo() const { return variableColumnNo_; } const std::map<Key, size_t>& variableColumnNo() const {
const std::map<Key, size_t>& variableDims() const { return variableDims_; } return variableColumnNo_;
size_t nrColumns() const {return nrColumns_;} }
const KeySet& freeVars() const { return freeVars_; } const std::map<Key, size_t>& variableDims() const {
return variableDims_;
}
size_t nrColumns() const {
return nrColumns_;
}
const KeySet& freeVars() const {
return freeVars_;
}
/** /**
* Build lpsolve's column number for a list of keys * Build lpsolve's column number for a list of keys
@ -64,7 +75,8 @@ public:
std::vector<int> columnNo; std::vector<int> columnNo;
BOOST_FOREACH(Key key, keyList) { BOOST_FOREACH(Key key, keyList) {
std::vector<int> varIndices = boost::copy_range<std::vector<int> >( std::vector<int> varIndices = boost::copy_range<std::vector<int> >(
boost::irange(variableColumnNo_.at(key), variableColumnNo_.at(key) + variableDims_.at(key))); boost::irange(variableColumnNo_.at(key),
variableColumnNo_.at(key) + variableDims_.at(key)));
columnNo.insert(columnNo.end(), varIndices.begin(), varIndices.end()); columnNo.insert(columnNo.end(), varIndices.begin(), varIndices.end());
} }
return columnNo; return columnNo;
@ -82,7 +94,6 @@ public:
*/ */
void addBounds(const boost::shared_ptr<lprec>& lp) const; void addBounds(const boost::shared_ptr<lprec>& lp) const;
/** /**
* Main function to build lpsolve model * Main function to build lpsolve model
* TODO: consider lp as a class variable and support setConstraints * TODO: consider lp as a class variable and support setConstraints

View File

@ -5,19 +5,20 @@
* @author: thduynguyen * @author: thduynguyen
*/ */
#include <boost/foreach.hpp>
#include <boost/range/adaptor/map.hpp>
#include <gtsam/inference/Symbol.h> #include <gtsam/inference/Symbol.h>
#include <gtsam_unstable/linear/QPSolver.h> #include <gtsam_unstable/linear/QPSolver.h>
#include <gtsam_unstable/linear/LPSolver.h> #include <gtsam_unstable/linear/LPSolver.h>
#include <boost/foreach.hpp>
#include <boost/range/adaptor/map.hpp>
using namespace std; using namespace std;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
QPSolver::QPSolver(const GaussianFactorGraph& graph) : QPSolver::QPSolver(const GaussianFactorGraph& graph) :
graph_(graph), fullFactorIndices_(graph) { graph_(graph), fullFactorIndices_(graph) {
// Split the original graph into unconstrained and constrained part // Split the original graph into unconstrained and constrained part
// and collect indices of constrained factors // and collect indices of constrained factors
for (size_t i = 0; i < graph.nrFactors(); ++i) { for (size_t i = 0; i < graph.nrFactors(); ++i) {
@ -30,21 +31,21 @@ QPSolver::QPSolver(const GaussianFactorGraph& graph) :
} }
// Collect constrained variable keys // Collect constrained variable keys
std::set<size_t> constrainedVars; set<size_t> constrainedVars;
BOOST_FOREACH(size_t index, constraintIndices_) { BOOST_FOREACH(size_t index, constraintIndices_) {
KeyVector keys = graph.at(index)->keys(); KeyVector keys = graph.at(index)->keys();
constrainedVars.insert(keys.begin(), keys.end()); constrainedVars.insert(keys.begin(), keys.end());
} }
// Collect unconstrained hessians of constrained vars to build dual graph // Collect unconstrained hessians of constrained vars to build dual graph
freeHessians_ = unconstrainedHessiansOfConstrainedVars(graph, constrainedVars); freeHessians_ = unconstrainedHessiansOfConstrainedVars(graph,
constrainedVars);
freeHessianFactorIndex_ = VariableIndex(*freeHessians_); freeHessianFactorIndex_ = VariableIndex(*freeHessians_);
} }
/* ************************************************************************* */ /* ************************************************************************* */
GaussianFactorGraph::shared_ptr QPSolver::unconstrainedHessiansOfConstrainedVars( GaussianFactorGraph::shared_ptr QPSolver::unconstrainedHessiansOfConstrainedVars(
const GaussianFactorGraph& graph, const std::set<Key>& constrainedVars) const { const GaussianFactorGraph& graph, const set<Key>& constrainedVars) const {
VariableIndex variableIndex(graph); VariableIndex variableIndex(graph);
GaussianFactorGraph::shared_ptr hfg(new GaussianFactorGraph()); GaussianFactorGraph::shared_ptr hfg(new GaussianFactorGraph());
// Collect all factors involving constrained vars // Collect all factors involving constrained vars
@ -58,7 +59,8 @@ GaussianFactorGraph::shared_ptr QPSolver::unconstrainedHessiansOfConstrainedVars
// Convert each factor into Hessian // Convert each factor into Hessian
BOOST_FOREACH(size_t factorIndex, factors) { BOOST_FOREACH(size_t factorIndex, factors) {
if (!graph[factorIndex]) continue; if (!graph[factorIndex])
continue;
// See if this is a Jacobian factor // See if this is a Jacobian factor
JacobianFactor::shared_ptr jf = toJacobian(graph[factorIndex]); JacobianFactor::shared_ptr jf = toJacobian(graph[factorIndex]);
if (jf) { if (jf) {
@ -68,20 +70,21 @@ GaussianFactorGraph::shared_ptr QPSolver::unconstrainedHessiansOfConstrainedVars
Vector sigmas = jf->get_model()->sigmas(); Vector sigmas = jf->get_model()->sigmas();
Vector newPrecisions(sigmas.size()); Vector newPrecisions(sigmas.size());
bool mixed = false; bool mixed = false;
for (size_t s=0; s<sigmas.size(); ++s) { for (size_t s = 0; s < sigmas.size(); ++s) {
if (sigmas[s] <= 1e-9) newPrecisions[s] = 0.0; // 0 info for constraints (both ineq and eq) if (sigmas[s] <= 1e-9)
newPrecisions[s] = 0.0; // 0 info for constraints (both ineq and eq)
else { else {
newPrecisions[s] = 1.0/sigmas[s]; newPrecisions[s] = 1.0 / sigmas[s];
mixed = true; mixed = true;
} }
} }
if (mixed) { // only add free hessians if it's mixed if (mixed) { // only add free hessians if it's mixed
JacobianFactor::shared_ptr newJacobian = toJacobian(jf->clone()); JacobianFactor::shared_ptr newJacobian = toJacobian(jf->clone());
newJacobian->setModel(noiseModel::Diagonal::Precisions(newPrecisions)); newJacobian->setModel(
noiseModel::Diagonal::Precisions(newPrecisions));
hfg->push_back(HessianFactor(*newJacobian)); hfg->push_back(HessianFactor(*newJacobian));
} }
} } else { // unconstrained Jacobian
else { // unconstrained Jacobian
// Convert the original linear factor to Hessian factor // Convert the original linear factor to Hessian factor
// TODO: This may fail and throw the following exception // TODO: This may fail and throw the following exception
// Assertion failed: (((!PanelMode) && stride==0 && offset==0) || // Assertion failed: (((!PanelMode) && stride==0 && offset==0) ||
@ -92,8 +95,7 @@ GaussianFactorGraph::shared_ptr QPSolver::unconstrainedHessiansOfConstrainedVars
// My current way to fix this is to compile both gtsam and my library in Release mode // My current way to fix this is to compile both gtsam and my library in Release mode
hfg->add(HessianFactor(*jf)); hfg->add(HessianFactor(*jf));
} }
} } else { // If it's not a Jacobian, it should be a hessian factor. Just add!
else { // If it's not a Jacobian, it should be a hessian factor. Just add!
hfg->push_back(graph[factorIndex]); hfg->push_back(graph[factorIndex]);
} }
} }
@ -111,17 +113,23 @@ GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph,
// For each variable xi involving in some constraint, compute the unconstrained gradient // For each variable xi involving in some constraint, compute the unconstrained gradient
// wrt xi from the prebuilt freeHessian graph // wrt xi from the prebuilt freeHessian graph
// \grad f(xi) = \frac{\partial f}{\partial xi}' = \sum_j G_ij*xj - gi // \grad f(xi) = \frac{\partial f}{\partial xi}' = \sum_j G_ij*xj - gi
if (debug) freeHessianFactorIndex_.print("freeHessianFactorIndex_: "); if (debug)
freeHessianFactorIndex_.print("freeHessianFactorIndex_: ");
BOOST_FOREACH(const VariableIndex::value_type& xiKey_factors, freeHessianFactorIndex_) { BOOST_FOREACH(const VariableIndex::value_type& xiKey_factors, freeHessianFactorIndex_) {
Key xiKey = xiKey_factors.first; Key xiKey = xiKey_factors.first;
VariableIndex::Factors xiFactors = xiKey_factors.second; VariableIndex::Factors xiFactors = xiKey_factors.second;
// Find xi's dim from the first factor on xi // Find xi's dim from the first factor on xi
if (xiFactors.size() == 0) continue; if (xiFactors.size() == 0)
GaussianFactor::shared_ptr xiFactor0 = freeHessians_->at(*xiFactors.begin()); continue;
GaussianFactor::shared_ptr xiFactor0 = freeHessians_->at(
*xiFactors.begin());
size_t xiDim = xiFactor0->getDim(xiFactor0->find(xiKey)); size_t xiDim = xiFactor0->getDim(xiFactor0->find(xiKey));
if (debug) xiFactor0->print("xiFactor0: "); if (debug)
if (debug) cout << "xiKey: " << string(Symbol(xiKey)) << ", xiDim: " << xiDim << endl; xiFactor0->print("xiFactor0: ");
if (debug)
cout << "xiKey: " << string(Symbol(xiKey)) << ", xiDim: " << xiDim
<< endl;
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++// //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// Compute the b-vector for the dual factor Ax-b // Compute the b-vector for the dual factor Ax-b
@ -139,8 +147,7 @@ GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph,
if (xi > xj) { if (xi > xj) {
Matrix Gji = factor->info(xj, xi); Matrix Gji = factor->info(xj, xi);
Gij = Gji.transpose(); Gij = Gji.transpose();
} } else {
else {
Gij = factor->info(xi, xj); Gij = factor->info(xi, xj);
} }
// Accumulate Gij*xj to gradf // Accumulate Gij*xj to gradf
@ -155,20 +162,22 @@ GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph,
// Compute the Jacobian A for the dual factor Ax-b // Compute the Jacobian A for the dual factor Ax-b
// Obtain the jacobians for lambda variables from their corresponding constraints // Obtain the jacobians for lambda variables from their corresponding constraints
// A = gradc_k(xi) = \frac{\partial c_k}{\partial xi}' // A = gradc_k(xi) = \frac{\partial c_k}{\partial xi}'
std::vector<std::pair<Key, Matrix> > lambdaTerms; // collection of lambda_k, and gradc_k vector<pair<Key, Matrix> > lambdaTerms; // collection of lambda_k, and gradc_k
typedef std::pair<size_t, size_t> FactorIx_SigmaIx; typedef pair<size_t, size_t> FactorIx_SigmaIx;
std::vector<FactorIx_SigmaIx> unconstrainedIndex; // pairs of factorIx,sigmaIx of unconstrained rows vector<FactorIx_SigmaIx> unconstrainedIndex; // pairs of factorIx,sigmaIx of unconstrained rows
BOOST_FOREACH(size_t factorIndex, fullFactorIndices_[xiKey]) { BOOST_FOREACH(size_t factorIndex, fullFactorIndices_[xiKey]) {
JacobianFactor::shared_ptr factor = toJacobian(graph.at(factorIndex)); JacobianFactor::shared_ptr factor = toJacobian(graph.at(factorIndex));
if (!factor || !factor->isConstrained()) continue; if (!factor || !factor->isConstrained())
continue;
// Gradient is the transpose of the Jacobian: A_k = gradc_k(xi) = \frac{\partial c_k}{\partial xi}' // Gradient is the transpose of the Jacobian: A_k = gradc_k(xi) = \frac{\partial c_k}{\partial xi}'
// Each column for each lambda_k corresponds to [the transpose of] each constrained row factor // Each column for each lambda_k corresponds to [the transpose of] each constrained row factor
Matrix A_k = factor->getA(factor->find(xiKey)).transpose(); Matrix A_k = factor->getA(factor->find(xiKey)).transpose();
if (debug) gtsam::print(A_k, "A_k = "); if (debug)
gtsam::print(A_k, "A_k = ");
// Deal with mixed sigmas: no information if sigma != 0 // Deal with mixed sigmas: no information if sigma != 0
Vector sigmas = factor->get_model()->sigmas(); Vector sigmas = factor->get_model()->sigmas();
for (size_t sigmaIx = 0; sigmaIx<sigmas.size(); ++sigmaIx) { for (size_t sigmaIx = 0; sigmaIx < sigmas.size(); ++sigmaIx) {
// if it's either ineq (sigma<0) or unconstrained (sigma>0) // if it's either ineq (sigma<0) or unconstrained (sigma>0)
// we have no information about it // we have no information about it
if (fabs(sigmas[sigmaIx]) > 1e-9) { if (fabs(sigmas[sigmaIx]) > 1e-9) {
@ -185,31 +194,37 @@ GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph,
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++// //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// Create and add factors to the dual graph // Create and add factors to the dual graph
// If least square approximation is desired, use unit noise model. // If least square approximation is desired, use unit noise model.
if (debug) cout << "Create dual factor" << endl; if (debug)
cout << "Create dual factor" << endl;
if (useLeastSquare) { if (useLeastSquare) {
if (debug) cout << "use least square!" << endl; if (debug)
dualGraph.push_back(JacobianFactor(lambdaTerms, gradf_xi, cout << "use least square!" << endl;
noiseModel::Unit::Create(gradf_xi.size()))); dualGraph.push_back(
} JacobianFactor(lambdaTerms, gradf_xi,
else { noiseModel::Unit::Create(gradf_xi.size())));
} else {
// Enforce constrained noise model so lambdas are solved with QR // Enforce constrained noise model so lambdas are solved with QR
// and should exactly satisfy all the equations // and should exactly satisfy all the equations
if (debug) cout << gradf_xi << endl; if (debug)
dualGraph.push_back(JacobianFactor(lambdaTerms, gradf_xi, cout << gradf_xi << endl;
noiseModel::Constrained::All(gradf_xi.size()))); dualGraph.push_back(
JacobianFactor(lambdaTerms, gradf_xi,
noiseModel::Constrained::All(gradf_xi.size())));
} }
// Add 0 priors on all lambdas of the unconstrained rows to make sure the graph is solvable // Add 0 priors on all lambdas of the unconstrained rows to make sure the graph is solvable
if (debug) cout << "Create priors" << endl; if (debug)
cout << "Create priors" << endl;
BOOST_FOREACH(FactorIx_SigmaIx factorIx_sigmaIx, unconstrainedIndex) { BOOST_FOREACH(FactorIx_SigmaIx factorIx_sigmaIx, unconstrainedIndex) {
size_t factorIx = factorIx_sigmaIx.first; size_t factorIx = factorIx_sigmaIx.first;
JacobianFactor::shared_ptr factor = toJacobian(graph.at(factorIx)); JacobianFactor::shared_ptr factor = toJacobian(graph.at(factorIx));
size_t dim= factor->get_model()->dim(); size_t dim = factor->get_model()->dim();
Matrix J = zeros(dim, dim); Matrix J = zeros(dim, dim);
size_t sigmaIx = factorIx_sigmaIx.second; size_t sigmaIx = factorIx_sigmaIx.second;
J(sigmaIx,sigmaIx) = 1.0; J(sigmaIx, sigmaIx) = 1.0;
// Use factorIndex as the lambda's key. // Use factorIndex as the lambda's key.
if (debug) cout << "prior for factor " << factorIx << endl; if (debug)
cout << "prior for factor " << factorIx << endl;
dualGraph.push_back(JacobianFactor(factorIx, J, zero(dim))); dualGraph.push_back(JacobianFactor(factorIx, J, zero(dim)));
} }
} }
@ -218,7 +233,8 @@ GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph,
} }
/* ************************************************************************* */ /* ************************************************************************* */
std::pair<int, int> QPSolver::findWorstViolatedActiveIneq(const VectorValues& lambdas) const { pair<int, int> QPSolver::findWorstViolatedActiveIneq(
const VectorValues& lambdas) const {
int worstFactorIx = -1, worstSigmaIx = -1; int worstFactorIx = -1, worstSigmaIx = -1;
// preset the maxLambda to 0.0: if lambda is <= 0.0, the constraint is either // preset the maxLambda to 0.0: if lambda is <= 0.0, the constraint is either
// inactive or a good ineq constraint, so we don't care! // inactive or a good ineq constraint, so we don't care!
@ -226,9 +242,9 @@ std::pair<int, int> QPSolver::findWorstViolatedActiveIneq(const VectorValues& la
BOOST_FOREACH(size_t factorIx, constraintIndices_) { BOOST_FOREACH(size_t factorIx, constraintIndices_) {
Vector lambda = lambdas.at(factorIx); Vector lambda = lambdas.at(factorIx);
Vector orgSigmas = toJacobian(graph_.at(factorIx))->get_model()->sigmas(); Vector orgSigmas = toJacobian(graph_.at(factorIx))->get_model()->sigmas();
for (size_t j = 0; j<orgSigmas.size(); ++j) for (size_t j = 0; j < orgSigmas.size(); ++j)
// If it is a BAD active inequality, and lambda is larger than the current max // If it is a BAD active inequality, and lambda is larger than the current max
if (orgSigmas[j]<0 && lambda[j] > maxLambda) { if (orgSigmas[j] < 0 && lambda[j] > maxLambda) {
worstFactorIx = factorIx; worstFactorIx = factorIx;
worstSigmaIx = j; worstSigmaIx = j;
maxLambda = lambda[j]; maxLambda = lambda[j];
@ -237,9 +253,9 @@ std::pair<int, int> QPSolver::findWorstViolatedActiveIneq(const VectorValues& la
return make_pair(worstFactorIx, worstSigmaIx); return make_pair(worstFactorIx, worstSigmaIx);
} }
/* ************************************************************************* */ /* ************************************************************************* */bool QPSolver::updateWorkingSetInplace(
bool QPSolver::updateWorkingSetInplace(GaussianFactorGraph& workingGraph, GaussianFactorGraph& workingGraph, int factorIx, int sigmaIx,
int factorIx, int sigmaIx, double newSigma) const { double newSigma) const {
if (factorIx < 0 || sigmaIx < 0) if (factorIx < 0 || sigmaIx < 0)
return false; return false;
Vector sigmas = toJacobian(workingGraph.at(factorIx))->get_model()->sigmas(); Vector sigmas = toJacobian(workingGraph.at(factorIx))->get_model()->sigmas();
@ -264,8 +280,9 @@ bool QPSolver::updateWorkingSetInplace(GaussianFactorGraph& workingGraph,
* *
* We want the minimum of all those alphas among all inactive ineq. * We want the minimum of all those alphas among all inactive ineq.
*/ */
boost::tuple<double, int, int> QPSolver::computeStepSize(const GaussianFactorGraph& workingGraph, boost::tuple<double, int, int> QPSolver::computeStepSize(
const VectorValues& xk, const VectorValues& p) const { const GaussianFactorGraph& workingGraph, const VectorValues& xk,
const VectorValues& p) const {
static bool debug = false; static bool debug = false;
double minAlpha = 1.0; double minAlpha = 1.0;
@ -274,33 +291,39 @@ boost::tuple<double, int, int> QPSolver::computeStepSize(const GaussianFactorGra
JacobianFactor::shared_ptr jacobian = toJacobian(workingGraph.at(factorIx)); JacobianFactor::shared_ptr jacobian = toJacobian(workingGraph.at(factorIx));
Vector sigmas = jacobian->get_model()->sigmas(); Vector sigmas = jacobian->get_model()->sigmas();
Vector b = jacobian->getb(); Vector b = jacobian->getb();
for (size_t s = 0; s<sigmas.size(); ++s) { for (size_t s = 0; s < sigmas.size(); ++s) {
// If it is an inactive inequality, compute alpha and update min // If it is an inactive inequality, compute alpha and update min
if (sigmas[s]<0) { if (sigmas[s] < 0) {
// Compute aj'*p // Compute aj'*p
double ajTp = 0.0; double ajTp = 0.0;
for (Factor::const_iterator xj = jacobian->begin(); xj != jacobian->end(); ++xj) { for (Factor::const_iterator xj = jacobian->begin();
xj != jacobian->end(); ++xj) {
Vector pj = p.at(*xj); Vector pj = p.at(*xj);
Vector aj = jacobian->getA(xj).row(s); Vector aj = jacobian->getA(xj).row(s);
ajTp += aj.dot(pj); ajTp += aj.dot(pj);
} }
if (debug) cout << "s, ajTp, b[s]: " << s << " " << ajTp << " " << b[s] << endl; if (debug)
cout << "s, ajTp, b[s]: " << s << " " << ajTp << " " << b[s] << endl;
// Check if aj'*p >0. Don't care if it's not. // Check if aj'*p >0. Don't care if it's not.
if (ajTp<=0) continue; if (ajTp <= 0)
continue;
// Compute aj'*xk // Compute aj'*xk
double ajTx = 0.0; double ajTx = 0.0;
for (Factor::const_iterator xj = jacobian->begin(); xj != jacobian->end(); ++xj) { for (Factor::const_iterator xj = jacobian->begin();
xj != jacobian->end(); ++xj) {
Vector xkj = xk.at(*xj); Vector xkj = xk.at(*xj);
Vector aj = jacobian->getA(xj).row(s); Vector aj = jacobian->getA(xj).row(s);
ajTx += aj.dot(xkj); ajTx += aj.dot(xkj);
} }
if (debug) cout << "b[s], ajTx: " << b[s] << " " << ajTx << " " << ajTp << endl; if (debug)
cout << "b[s], ajTx: " << b[s] << " " << ajTx << " " << ajTp << endl;
// alpha = (bj - aj'*xk) / (aj'*p) // alpha = (bj - aj'*xk) / (aj'*p)
double alpha = (b[s] - ajTx)/ajTp; double alpha = (b[s] - ajTx) / ajTp;
if (debug) cout << "alpha: " << alpha << endl; if (debug)
cout << "alpha: " << alpha << endl;
// We want the minimum of all those max alphas // We want the minimum of all those max alphas
if (alpha < minAlpha) { if (alpha < minAlpha) {
@ -314,41 +337,52 @@ boost::tuple<double, int, int> QPSolver::computeStepSize(const GaussianFactorGra
return boost::make_tuple(minAlpha, closestFactorIx, closestSigmaIx); return boost::make_tuple(minAlpha, closestFactorIx, closestSigmaIx);
} }
/* ************************************************************************* */ /* ************************************************************************* */bool QPSolver::iterateInPlace(
bool QPSolver::iterateInPlace(GaussianFactorGraph& workingGraph, VectorValues& currentSolution, VectorValues& lambdas) const { GaussianFactorGraph& workingGraph, VectorValues& currentSolution,
VectorValues& lambdas) const {
static bool debug = false; static bool debug = false;
if (debug) workingGraph.print("workingGraph: "); if (debug)
workingGraph.print("workingGraph: ");
// Obtain the solution from the current working graph // Obtain the solution from the current working graph
VectorValues newSolution = workingGraph.optimize(); VectorValues newSolution = workingGraph.optimize();
if (debug) newSolution.print("New solution:"); if (debug)
newSolution.print("New solution:");
// If we CAN'T move further // If we CAN'T move further
if (newSolution.equals(currentSolution, 1e-5)) { if (newSolution.equals(currentSolution, 1e-5)) {
// Compute lambda from the dual graph // Compute lambda from the dual graph
if (debug) cout << "Building dual graph..." << endl; if (debug)
cout << "Building dual graph..." << endl;
GaussianFactorGraph dualGraph = buildDualGraph(workingGraph, newSolution); GaussianFactorGraph dualGraph = buildDualGraph(workingGraph, newSolution);
if (debug) dualGraph.print("Dual graph: "); if (debug)
dualGraph.print("Dual graph: ");
lambdas = dualGraph.optimize(); lambdas = dualGraph.optimize();
if (debug) lambdas.print("lambdas :"); if (debug)
lambdas.print("lambdas :");
int factorIx, sigmaIx; int factorIx, sigmaIx;
boost::tie(factorIx, sigmaIx) = findWorstViolatedActiveIneq(lambdas); boost::tie(factorIx, sigmaIx) = findWorstViolatedActiveIneq(lambdas);
if (debug) cout << "violated active ineq - factorIx, sigmaIx: " << factorIx << " " << sigmaIx << endl; if (debug)
cout << "violated active ineq - factorIx, sigmaIx: " << factorIx << " "
<< sigmaIx << endl;
// Try to disactivate the weakest violated ineq constraints // Try to disactivate the weakest violated ineq constraints
// if not successful, i.e. all ineq constraints are satisfied: We have the solution!! // if not successful, i.e. all ineq constraints are satisfied: We have the solution!!
if (!updateWorkingSetInplace(workingGraph, factorIx, sigmaIx, -1.0)) if (!updateWorkingSetInplace(workingGraph, factorIx, sigmaIx, -1.0))
return true; return true;
} } else {
else {
// If we CAN make some progress // If we CAN make some progress
// Adapt stepsize if some inactive inequality constraints complain about this move // Adapt stepsize if some inactive inequality constraints complain about this move
if (debug) cout << "Computing stepsize..." << endl; if (debug)
cout << "Computing stepsize..." << endl;
double alpha; double alpha;
int factorIx, sigmaIx; int factorIx, sigmaIx;
VectorValues p = newSolution - currentSolution; VectorValues p = newSolution - currentSolution;
boost::tie(alpha, factorIx, sigmaIx) = computeStepSize(workingGraph, currentSolution, p); boost::tie(alpha, factorIx, sigmaIx) = computeStepSize(workingGraph,
if (debug) cout << "alpha, factorIx, sigmaIx: " << alpha << " " << factorIx << " " << sigmaIx << endl; currentSolution, p);
if (debug)
cout << "alpha, factorIx, sigmaIx: " << alpha << " " << factorIx << " "
<< sigmaIx << endl;
// also add to the working set the one that complains the most // also add to the working set the one that complains the most
updateWorkingSetInplace(workingGraph, factorIx, sigmaIx, 0.0); updateWorkingSetInplace(workingGraph, factorIx, sigmaIx, 0.0);
// step! // step!
@ -367,7 +401,8 @@ bool QPSolver::iterateInPlace(GaussianFactorGraph& workingGraph, VectorValues& c
} }
/* ************************************************************************* */ /* ************************************************************************* */
std::pair<VectorValues, VectorValues> QPSolver::optimize(const VectorValues& initials) const { pair<VectorValues, VectorValues> QPSolver::optimize(
const VectorValues& initials) const {
GaussianFactorGraph workingGraph = graph_.clone(); GaussianFactorGraph workingGraph = graph_.clone();
VectorValues currentSolution = initials; VectorValues currentSolution = initials;
VectorValues lambdas; VectorValues lambdas;
@ -379,10 +414,11 @@ std::pair<VectorValues, VectorValues> QPSolver::optimize(const VectorValues& ini
} }
/* ************************************************************************* */ /* ************************************************************************* */
std::pair<VectorValues, Key> QPSolver::initialValuesLP() const { pair<VectorValues, Key> QPSolver::initialValuesLP() const {
size_t firstSlackKey = 0; size_t firstSlackKey = 0;
BOOST_FOREACH(Key key, fullFactorIndices_ | boost::adaptors::map_keys) { BOOST_FOREACH(Key key, fullFactorIndices_ | boost::adaptors::map_keys) {
if (firstSlackKey < key) firstSlackKey = key; if (firstSlackKey < key)
firstSlackKey = key;
} }
firstSlackKey += 1; firstSlackKey += 1;
@ -406,9 +442,9 @@ std::pair<VectorValues, Key> QPSolver::initialValuesLP() const {
Vector errorAtZero = jacobian->getb(); Vector errorAtZero = jacobian->getb();
Vector slackInit = zero(errorAtZero.size()); Vector slackInit = zero(errorAtZero.size());
Vector sigmas = jacobian->get_model()->sigmas(); Vector sigmas = jacobian->get_model()->sigmas();
for (size_t i = 0; i<sigmas.size(); ++i) { for (size_t i = 0; i < sigmas.size(); ++i) {
if (sigmas[i] < 0) { if (sigmas[i] < 0) {
slackInit[i] = std::max(errorAtZero[i], 0.0); slackInit[i] = max(errorAtZero[i], 0.0);
} else if (sigmas[i] == 0.0) { } else if (sigmas[i] == 0.0) {
errorAtZero[i] = fabs(errorAtZero[i]); errorAtZero[i] = fabs(errorAtZero[i]);
} // if it has >0 sigma, i.e. normal Gaussian noise, initialize it at 0 } // if it has >0 sigma, i.e. normal Gaussian noise, initialize it at 0
@ -439,18 +475,19 @@ VectorValues QPSolver::objectiveCoeffsLP(Key firstSlackKey) const {
} }
/* ************************************************************************* */ /* ************************************************************************* */
std::pair<GaussianFactorGraph::shared_ptr, VectorValues> QPSolver::constraintsLP( pair<GaussianFactorGraph::shared_ptr, VectorValues> QPSolver::constraintsLP(
Key firstSlackKey) const { Key firstSlackKey) const {
// Create constraints and 0 lower bounds (zi>=0) // Create constraints and 0 lower bounds (zi>=0)
GaussianFactorGraph::shared_ptr constraints(new GaussianFactorGraph()); GaussianFactorGraph::shared_ptr constraints(new GaussianFactorGraph());
VectorValues slackLowerBounds; VectorValues slackLowerBounds;
for (size_t key = firstSlackKey; key<firstSlackKey + constraintIndices_.size(); ++key) { for (size_t key = firstSlackKey;
size_t iFactor = constraintIndices_[key-firstSlackKey]; key < firstSlackKey + constraintIndices_.size(); ++key) {
size_t iFactor = constraintIndices_[key - firstSlackKey];
JacobianFactor::shared_ptr jacobian = toJacobian(graph_.at(iFactor)); JacobianFactor::shared_ptr jacobian = toJacobian(graph_.at(iFactor));
// Collect old terms to form a new factor // Collect old terms to form a new factor
// TODO: it might be faster if we can get the whole block matrix at once // TODO: it might be faster if we can get the whole block matrix at once
// but I don't know how to extend the current VerticalBlockMatrix // but I don't know how to extend the current VerticalBlockMatrix
std::vector<std::pair<Key, Matrix> > terms; vector<pair<Key, Matrix> > terms;
for (Factor::iterator it = jacobian->begin(); it != jacobian->end(); ++it) { for (Factor::iterator it = jacobian->begin(); it != jacobian->end(); ++it) {
terms.push_back(make_pair(*it, jacobian->getA(it))); terms.push_back(make_pair(*it, jacobian->getA(it)));
} }
@ -459,7 +496,8 @@ std::pair<GaussianFactorGraph::shared_ptr, VectorValues> QPSolver::constraintsLP
// LE constraints ax <= b for sigma < 0. // LE constraints ax <= b for sigma < 0.
size_t dim = jacobian->rows(); size_t dim = jacobian->rows();
terms.push_back(make_pair(key, -eye(dim))); terms.push_back(make_pair(key, -eye(dim)));
constraints->push_back(JacobianFactor(terms, jacobian->getb(), jacobian->get_model())); constraints->push_back(
JacobianFactor(terms, jacobian->getb(), jacobian->get_model()));
// Add lower bound for this slack key // Add lower bound for this slack key
slackLowerBounds.insert(key, zero(dim)); slackLowerBounds.insert(key, zero(dim));
} }
@ -467,7 +505,7 @@ std::pair<GaussianFactorGraph::shared_ptr, VectorValues> QPSolver::constraintsLP
} }
/* ************************************************************************* */ /* ************************************************************************* */
std::pair<bool, VectorValues> QPSolver::findFeasibleInitialValues() const { pair<bool, VectorValues> QPSolver::findFeasibleInitialValues() const {
static const bool debug = false; static const bool debug = false;
// Initial values with slack variables for the LP subproblem, Nocedal06book, pg.473 // Initial values with slack variables for the LP subproblem, Nocedal06book, pg.473
VectorValues initials; VectorValues initials;
@ -486,14 +524,19 @@ std::pair<bool, VectorValues> QPSolver::findFeasibleInitialValues() const {
LPSolver lpSolver(objectiveLP, constraints, slackLowerBounds); LPSolver lpSolver(objectiveLP, constraints, slackLowerBounds);
VectorValues solution = lpSolver.solve(); VectorValues solution = lpSolver.solve();
if (debug) initials.print("Initials LP: "); if (debug)
if (debug) objectiveLP.print("Objective LP: "); initials.print("Initials LP: ");
if (debug) constraints->print("Constraints LP: "); if (debug)
if (debug) solution.print("LP solution: "); objectiveLP.print("Objective LP: ");
if (debug)
constraints->print("Constraints LP: ");
if (debug)
solution.print("LP solution: ");
// Remove slack variables from solution // Remove slack variables from solution
double slackSum = 0.0; double slackSum = 0.0;
for (Key key = firstSlackKey; key < firstSlackKey+constraintIndices_.size(); ++key) { for (Key key = firstSlackKey; key < firstSlackKey + constraintIndices_.size();
++key) {
slackSum += solution.at(key).cwiseAbs().sum(); slackSum += solution.at(key).cwiseAbs().sum();
solution.erase(key); solution.erase(key);
} }
@ -501,22 +544,23 @@ std::pair<bool, VectorValues> QPSolver::findFeasibleInitialValues() const {
// Insert zero vectors for free variables that are not in the constraints // Insert zero vectors for free variables that are not in the constraints
BOOST_FOREACH(Key key, fullFactorIndices_ | boost::adaptors::map_keys) { BOOST_FOREACH(Key key, fullFactorIndices_ | boost::adaptors::map_keys) {
if (!solution.exists(key)) { if (!solution.exists(key)) {
GaussianFactor::shared_ptr factor = graph_.at(*fullFactorIndices_[key].begin()); GaussianFactor::shared_ptr factor = graph_.at(
*fullFactorIndices_[key].begin());
size_t dim = factor->getDim(factor->find(key)); size_t dim = factor->getDim(factor->find(key));
solution.insert(key, zero(dim)); solution.insert(key, zero(dim));
} }
} }
return make_pair(slackSum<1e-5, solution); return make_pair(slackSum < 1e-5, solution);
} }
/* ************************************************************************* */ /* ************************************************************************* */
std::pair<VectorValues, VectorValues> QPSolver::optimize() const { pair<VectorValues, VectorValues> QPSolver::optimize() const {
bool isFeasible; bool isFeasible;
VectorValues initials; VectorValues initials;
boost::tie(isFeasible, initials) = findFeasibleInitialValues(); boost::tie(isFeasible, initials) = findFeasibleInitialValues();
if (!isFeasible) { if (!isFeasible) {
throw std::runtime_error("LP subproblem is infeasible!"); throw runtime_error("LP subproblem is infeasible!");
} }
return optimize(initials); return optimize(initials);
} }

View File

@ -10,6 +10,9 @@
#include <gtsam/linear/GaussianFactorGraph.h> #include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/VectorValues.h> #include <gtsam/linear/VectorValues.h>
#include <vector>
#include <set>
namespace gtsam { namespace gtsam {
/** /**
@ -20,12 +23,12 @@ namespace gtsam {
* and a positive sigma denotes a normal Gaussian noise model. * and a positive sigma denotes a normal Gaussian noise model.
*/ */
class QPSolver { class QPSolver {
const GaussianFactorGraph& graph_; //!< the original graph, can't be modified! const GaussianFactorGraph& graph_; //!< the original graph, can't be modified!
FastVector<size_t> constraintIndices_; //!< Indices of constrained factors in the original graph FastVector<size_t> constraintIndices_; //!< Indices of constrained factors in the original graph
GaussianFactorGraph::shared_ptr freeHessians_; //!< unconstrained Hessians of constrained variables GaussianFactorGraph::shared_ptr freeHessians_; //!< unconstrained Hessians of constrained variables
VariableIndex freeHessianFactorIndex_; //!< indices of unconstrained Hessian factors of constrained variables VariableIndex freeHessianFactorIndex_; //!< indices of unconstrained Hessian factors of constrained variables
// gtsam calls it "VariableIndex", but I think FactorIndex // gtsam calls it "VariableIndex", but I think FactorIndex
// makes more sense, because it really stores factor indices. // makes more sense, because it really stores factor indices.
VariableIndex fullFactorIndices_; //!< indices of factors involving each variable. VariableIndex fullFactorIndices_; //!< indices of factors involving each variable.
// gtsam calls it "VariableIndex", but I think FactorIndex // gtsam calls it "VariableIndex", but I think FactorIndex
// makes more sense, because it really stores factor indices. // makes more sense, because it really stores factor indices.
@ -35,8 +38,9 @@ public:
QPSolver(const GaussianFactorGraph& graph); QPSolver(const GaussianFactorGraph& graph);
/// Return indices of all constrained factors /// Return indices of all constrained factors
FastVector<size_t> constraintIndices() const { return constraintIndices_; } FastVector<size_t> constraintIndices() const {
return constraintIndices_;
}
/// Return the Hessian factor graph of constrained variables /// Return the Hessian factor graph of constrained variables
GaussianFactorGraph::shared_ptr freeHessiansOfConstrainedVars() const { GaussianFactorGraph::shared_ptr freeHessiansOfConstrainedVars() const {
@ -73,37 +77,37 @@ public:
GaussianFactorGraph buildDualGraph(const GaussianFactorGraph& graph, GaussianFactorGraph buildDualGraph(const GaussianFactorGraph& graph,
const VectorValues& x0, bool useLeastSquare = false) const; const VectorValues& x0, bool useLeastSquare = false) const;
/** /**
* Find the BAD active ineq that pulls x strongest to the wrong direction of its constraint * Find the BAD active ineq that pulls x strongest to the wrong direction of its constraint
* (i.e. it is pulling towards >0, while its feasible region is <=0) * (i.e. it is pulling towards >0, while its feasible region is <=0)
* *
* For active ineq constraints (those that are enforced as eq constraints now * For active ineq constraints (those that are enforced as eq constraints now
* in the working set), we want lambda < 0. * in the working set), we want lambda < 0.
* This is because: * This is because:
* - From the Lagrangian L = f - lambda*c, we know that the constraint force is * - From the Lagrangian L = f - lambda*c, we know that the constraint force is
* (lambda * \grad c) = \grad f, because it cancels out the unconstrained * (lambda * \grad c) = \grad f, because it cancels out the unconstrained
* unconstrained force (-\grad f), which is pulling x in the opposite direction * unconstrained force (-\grad f), which is pulling x in the opposite direction
* of \grad f towards the unconstrained minimum point * of \grad f towards the unconstrained minimum point
* - We also know that at the constraint surface \grad c points toward + (>= 0), * - We also know that at the constraint surface \grad c points toward + (>= 0),
* while we are solving for - (<=0) constraint * while we are solving for - (<=0) constraint
* - So, we want the constraint force (lambda * \grad c) to to pull x * - So, we want the constraint force (lambda * \grad c) to to pull x
* towards the opposite direction of \grad c, i.e. towards the area * towards the opposite direction of \grad c, i.e. towards the area
* where the ineq constraint <=0 is satisfied. * where the ineq constraint <=0 is satisfied.
* - Hence, we want lambda < 0 * - Hence, we want lambda < 0
* *
* So active ineqs with lambda > 0 are BAD. And we want the worst one with the largest lambda. * So active ineqs with lambda > 0 are BAD. And we want the worst one with the largest lambda.
* *
*/ */
std::pair<int, int> findWorstViolatedActiveIneq(const VectorValues& lambdas) const; std::pair<int, int> findWorstViolatedActiveIneq(
const VectorValues& lambdas) const;
/** /**
* Deactivate or activate an ineq constraint in place * Deactivate or activate an ineq constraint in place
* Warning: modify in-place to avoid copy/clone * Warning: modify in-place to avoid copy/clone
* @return true if update successful * @return true if update successful
*/ */
bool updateWorkingSetInplace(GaussianFactorGraph& workingGraph, bool updateWorkingSetInplace(GaussianFactorGraph& workingGraph, int factorIx,
int factorIx, int sigmaIx, double newSigma) const; int sigmaIx, double newSigma) const;
/** /**
* Compute step size alpha for the new solution x' = xk + alpha*p, where alpha \in [0,1] * Compute step size alpha for the new solution x' = xk + alpha*p, where alpha \in [0,1]
@ -113,12 +117,13 @@ public:
* This constraint will be added to the working set and become active * This constraint will be added to the working set and become active
* in the next iteration * in the next iteration
*/ */
boost::tuple<double, int, int> computeStepSize(const GaussianFactorGraph& workingGraph, boost::tuple<double, int, int> computeStepSize(
const VectorValues& xk, const VectorValues& p) const; const GaussianFactorGraph& workingGraph, const VectorValues& xk,
const VectorValues& p) const;
/** Iterate 1 step, modify workingGraph and currentSolution *IN PLACE* !!! */ /** Iterate 1 step, modify workingGraph and currentSolution *IN PLACE* !!! */
bool iterateInPlace(GaussianFactorGraph& workingGraph, VectorValues& currentSolution, bool iterateInPlace(GaussianFactorGraph& workingGraph,
VectorValues& lambdas) const; VectorValues& currentSolution, VectorValues& lambdas) const;
/** Optimize with a provided initial values /** Optimize with a provided initial values
* For this version, it is the responsibility of the caller to provide * For this version, it is the responsibility of the caller to provide
@ -127,7 +132,8 @@ public:
* of optimize(). * of optimize().
* @return a pair of <primal, dual> solutions * @return a pair of <primal, dual> solutions
*/ */
std::pair<VectorValues, VectorValues> optimize(const VectorValues& initials) const; std::pair<VectorValues, VectorValues> optimize(
const VectorValues& initials) const;
/** Optimize without an initial value. /** Optimize without an initial value.
* This version of optimize will try to find a feasible initial value by solving * This version of optimize will try to find a feasible initial value by solving
@ -137,7 +143,6 @@ public:
*/ */
std::pair<VectorValues, VectorValues> optimize() const; std::pair<VectorValues, VectorValues> optimize() const;
/** /**
* Create initial values for the LP subproblem * Create initial values for the LP subproblem
* @return initial values and the key for the first slack variable * @return initial values and the key for the first slack variable
@ -148,14 +153,16 @@ public:
VectorValues objectiveCoeffsLP(Key firstSlackKey) const; VectorValues objectiveCoeffsLP(Key firstSlackKey) const;
/// Build constraints and slacks' lower bounds for the LP subproblem /// Build constraints and slacks' lower bounds for the LP subproblem
std::pair<GaussianFactorGraph::shared_ptr, VectorValues> constraintsLP(Key firstSlackKey) const; std::pair<GaussianFactorGraph::shared_ptr, VectorValues> constraintsLP(
Key firstSlackKey) const;
/// Find a feasible initial point /// Find a feasible initial point
std::pair<bool, VectorValues> findFeasibleInitialValues() const; std::pair<bool, VectorValues> findFeasibleInitialValues() const;
/// Convert a Gaussian factor to a jacobian. return empty shared ptr if failed /// Convert a Gaussian factor to a jacobian. return empty shared ptr if failed
/// TODO: Move to GaussianFactor? /// TODO: Move to GaussianFactor?
static JacobianFactor::shared_ptr toJacobian(const GaussianFactor::shared_ptr& factor) { static JacobianFactor::shared_ptr toJacobian(
const GaussianFactor::shared_ptr& factor) {
JacobianFactor::shared_ptr jacobian( JacobianFactor::shared_ptr jacobian(
boost::dynamic_pointer_cast<JacobianFactor>(factor)); boost::dynamic_pointer_cast<JacobianFactor>(factor));
return jacobian; return jacobian;
@ -163,17 +170,19 @@ public:
/// Convert a Gaussian factor to a Hessian. Return empty shared ptr if failed /// Convert a Gaussian factor to a Hessian. Return empty shared ptr if failed
/// TODO: Move to GaussianFactor? /// TODO: Move to GaussianFactor?
static HessianFactor::shared_ptr toHessian(const GaussianFactor::shared_ptr factor) { static HessianFactor::shared_ptr toHessian(
HessianFactor::shared_ptr hessian(boost::dynamic_pointer_cast<HessianFactor>(factor)); const GaussianFactor::shared_ptr factor) {
HessianFactor::shared_ptr hessian(
boost::dynamic_pointer_cast<HessianFactor>(factor));
return hessian; return hessian;
} }
private: private:
/// Collect all free Hessians involving constrained variables into a graph /// Collect all free Hessians involving constrained variables into a graph
GaussianFactorGraph::shared_ptr unconstrainedHessiansOfConstrainedVars( GaussianFactorGraph::shared_ptr unconstrainedHessiansOfConstrainedVars(
const GaussianFactorGraph& graph, const std::set<Key>& constrainedVars) const; const GaussianFactorGraph& graph,
const std::set<Key>& constrainedVars) const;
}; };
} /* namespace gtsam */ } /* namespace gtsam */

View File

@ -16,11 +16,12 @@
* @author Duy-Nguyen Ta * @author Duy-Nguyen Ta
*/ */
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam/inference/Symbol.h> #include <gtsam/inference/Symbol.h>
#include <gtsam_unstable/linear/QPSolver.h> #include <gtsam_unstable/linear/QPSolver.h>
#include <CppUnitLite/TestHarness.h>
using namespace std; using namespace std;
using namespace gtsam; using namespace gtsam;
using namespace gtsam::symbol_shorthand; using namespace gtsam::symbol_shorthand;
@ -38,18 +39,18 @@ GaussianFactorGraph createTestCase() {
// 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f
// Hence, we have G11=2, G12 = -1, g1 = +3, G22 = 2, g2 = 0, f = 0 // Hence, we have G11=2, G12 = -1, g1 = +3, G22 = 2, g2 = 0, f = 0
graph.push_back( graph.push_back(
HessianFactor(X(1), X(2), 2.0*ones(1, 1), -ones(1, 1), 3.0*ones(1), HessianFactor(X(1), X(2), 2.0 * ones(1, 1), -ones(1, 1), 3.0 * ones(1),
2.0*ones(1, 1), zero(1), 10.0)); 2.0 * ones(1, 1), zero(1), 10.0));
// Inequality constraints // Inequality constraints
// Jacobian factors represent Ax-b, hence // Jacobian factors represent Ax-b, hence
// x1 + x2 <= 2 --> x1 + x2 -2 <= 0, --> b=2 // x1 + x2 <= 2 --> x1 + x2 -2 <= 0, --> b=2
Matrix A1 = (Matrix(4, 1)<<1, -1, 0, 1); Matrix A1 = (Matrix(4, 1) << 1, -1, 0, 1);
Matrix A2 = (Matrix(4, 1)<<1, 0, -1, 0); Matrix A2 = (Matrix(4, 1) << 1, 0, -1, 0);
Vector b = (Vector(4)<<2, 0, 0, 1.5); Vector b = (Vector(4) << 2, 0, 0, 1.5);
// Special constrained noise model denoting <= inequalities with negative sigmas // Special constrained noise model denoting <= inequalities with negative sigmas
noiseModel::Constrained::shared_ptr noise = noiseModel::Constrained::shared_ptr noise =
noiseModel::Constrained::MixedSigmas((Vector(4)<<-1, -1, -1, -1)); noiseModel::Constrained::MixedSigmas((Vector(4) << -1, -1, -1, -1));
graph.push_back(JacobianFactor(X(1), A1, X(2), A2, b, noise)); graph.push_back(JacobianFactor(X(1), A1, X(2), A2, b, noise));
return graph; return graph;
@ -63,20 +64,22 @@ TEST(QPSolver, constraintsAux) {
LONGS_EQUAL(1, constraintIx[0]); LONGS_EQUAL(1, constraintIx[0]);
VectorValues lambdas; VectorValues lambdas;
lambdas.insert(constraintIx[0], (Vector(4)<< -0.5, 0.0, 0.3, 0.1)); lambdas.insert(constraintIx[0], (Vector(4) << -0.5, 0.0, 0.3, 0.1));
int factorIx, lambdaIx; int factorIx, lambdaIx;
boost::tie(factorIx, lambdaIx) = solver.findWorstViolatedActiveIneq(lambdas); boost::tie(factorIx, lambdaIx) = solver.findWorstViolatedActiveIneq(lambdas);
LONGS_EQUAL(1, factorIx); LONGS_EQUAL(1, factorIx);
LONGS_EQUAL(2, lambdaIx); LONGS_EQUAL(2, lambdaIx);
VectorValues lambdas2; VectorValues lambdas2;
lambdas2.insert(constraintIx[0], (Vector(4)<< -0.5, 0.0, -0.3, -0.1)); lambdas2.insert(constraintIx[0], (Vector(4) << -0.5, 0.0, -0.3, -0.1));
int factorIx2, lambdaIx2; int factorIx2, lambdaIx2;
boost::tie(factorIx2, lambdaIx2) = solver.findWorstViolatedActiveIneq(lambdas2); boost::tie(factorIx2, lambdaIx2) = solver.findWorstViolatedActiveIneq(
lambdas2);
LONGS_EQUAL(-1, factorIx2); LONGS_EQUAL(-1, factorIx2);
LONGS_EQUAL(-1, lambdaIx2); LONGS_EQUAL(-1, lambdaIx2);
GaussianFactorGraph::shared_ptr freeHessian = solver.freeHessiansOfConstrainedVars(); GaussianFactorGraph::shared_ptr freeHessian =
solver.freeHessiansOfConstrainedVars();
GaussianFactorGraph expectedFreeHessian; GaussianFactorGraph expectedFreeHessian;
expectedFreeHessian.push_back( expectedFreeHessian.push_back(
HessianFactor(X(1), X(2), 2.0 * ones(1, 1), -ones(1, 1), 3.0 * ones(1), HessianFactor(X(1), X(2), 2.0 * ones(1, 1), -ones(1, 1), 3.0 * ones(1),
@ -94,17 +97,17 @@ GaussianFactorGraph createEqualityConstrainedTest() {
// 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f
// Hence, we have G11=2, G12 = 0, g1 = 0, G22 = 2, g2 = 0, f = 0 // Hence, we have G11=2, G12 = 0, g1 = 0, G22 = 2, g2 = 0, f = 0
graph.push_back( graph.push_back(
HessianFactor(X(1), X(2), 2.0*ones(1, 1), zeros(1, 1), zero(1), HessianFactor(X(1), X(2), 2.0 * ones(1, 1), zeros(1, 1), zero(1),
2.0*ones(1, 1), zero(1), 0.0)); 2.0 * ones(1, 1), zero(1), 0.0));
// Equality constraints // Equality constraints
// x1 + x2 = 1 --> x1 + x2 -1 = 0, hence we negate the b vector // x1 + x2 = 1 --> x1 + x2 -1 = 0, hence we negate the b vector
Matrix A1 = (Matrix(1, 1)<<1); Matrix A1 = (Matrix(1, 1) << 1);
Matrix A2 = (Matrix(1, 1)<<1); Matrix A2 = (Matrix(1, 1) << 1);
Vector b = -(Vector(1)<<1); Vector b = -(Vector(1) << 1);
// Special constrained noise model denoting <= inequalities with negative sigmas // Special constrained noise model denoting <= inequalities with negative sigmas
noiseModel::Constrained::shared_ptr noise = noiseModel::Constrained::shared_ptr noise =
noiseModel::Constrained::MixedSigmas((Vector(1)<<0.0)); noiseModel::Constrained::MixedSigmas((Vector(1) << 0.0));
graph.push_back(JacobianFactor(X(1), A1, X(2), A2, b, noise)); graph.push_back(JacobianFactor(X(1), A1, X(2), A2, b, noise));
return graph; return graph;
@ -123,7 +126,7 @@ TEST(QPSolver, dual) {
GaussianFactorGraph dualGraph = solver.buildDualGraph(graph, initials); GaussianFactorGraph dualGraph = solver.buildDualGraph(graph, initials);
VectorValues dual = dualGraph.optimize(); VectorValues dual = dualGraph.optimize();
VectorValues expectedDual; VectorValues expectedDual;
expectedDual.insert(1, (Vector(1)<<2.0)); expectedDual.insert(1, (Vector(1) << 2.0));
CHECK(assert_equal(expectedDual, dual, 1e-10)); CHECK(assert_equal(expectedDual, dual, 1e-10));
} }
@ -140,8 +143,8 @@ TEST(QPSolver, iterate) {
currentSolution.insert(X(2), zero(1)); currentSolution.insert(X(2), zero(1));
std::vector<VectorValues> expectedSolutions(3); std::vector<VectorValues> expectedSolutions(3);
expectedSolutions[0].insert(X(1), (Vector(1) << 4.0/3.0)); expectedSolutions[0].insert(X(1), (Vector(1) << 4.0 / 3.0));
expectedSolutions[0].insert(X(2), (Vector(1) << 2.0/3.0)); expectedSolutions[0].insert(X(2), (Vector(1) << 2.0 / 3.0));
expectedSolutions[1].insert(X(1), (Vector(1) << 1.5)); expectedSolutions[1].insert(X(1), (Vector(1) << 1.5));
expectedSolutions[1].insert(X(2), (Vector(1) << 0.5)); expectedSolutions[1].insert(X(2), (Vector(1) << 0.5));
expectedSolutions[2].insert(X(1), (Vector(1) << 1.5)); expectedSolutions[2].insert(X(1), (Vector(1) << 1.5));
@ -168,8 +171,8 @@ TEST(QPSolver, optimizeForst10book_pg171Ex5) {
VectorValues solution; VectorValues solution;
boost::tie(solution, boost::tuples::ignore) = solver.optimize(initials); boost::tie(solution, boost::tuples::ignore) = solver.optimize(initials);
VectorValues expectedSolution; VectorValues expectedSolution;
expectedSolution.insert(X(1), (Vector(1)<< 1.5)); expectedSolution.insert(X(1), (Vector(1) << 1.5));
expectedSolution.insert(X(2), (Vector(1)<< 0.5)); expectedSolution.insert(X(2), (Vector(1) << 0.5));
CHECK(assert_equal(expectedSolution, solution, 1e-100)); CHECK(assert_equal(expectedSolution, solution, 1e-100));
} }
@ -183,18 +186,18 @@ GaussianFactorGraph createTestMatlabQPEx() {
// 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f
// Hence, we have G11=1, G12 = -1, g1 = +2, G22 = 2, g2 = +6, f = 0 // Hence, we have G11=1, G12 = -1, g1 = +2, G22 = 2, g2 = +6, f = 0
graph.push_back( graph.push_back(
HessianFactor(X(1), X(2), 1.0*ones(1, 1), -ones(1, 1), 2.0*ones(1), HessianFactor(X(1), X(2), 1.0 * ones(1, 1), -ones(1, 1), 2.0 * ones(1),
2.0*ones(1, 1), 6*ones(1), 1000.0)); 2.0 * ones(1, 1), 6 * ones(1), 1000.0));
// Inequality constraints // Inequality constraints
// Jacobian factors represent Ax-b, hence // Jacobian factors represent Ax-b, hence
// x1 + x2 <= 2 --> x1 + x2 -2 <= 0, --> b=2 // x1 + x2 <= 2 --> x1 + x2 -2 <= 0, --> b=2
Matrix A1 = (Matrix(5, 1)<<1, -1, 2, -1, 0); Matrix A1 = (Matrix(5, 1) << 1, -1, 2, -1, 0);
Matrix A2 = (Matrix(5, 1)<<1, 2, 1, 0, -1); Matrix A2 = (Matrix(5, 1) << 1, 2, 1, 0, -1);
Vector b = (Vector(5) <<2, 2, 3, 0, 0); Vector b = (Vector(5) << 2, 2, 3, 0, 0);
// Special constrained noise model denoting <= inequalities with negative sigmas // Special constrained noise model denoting <= inequalities with negative sigmas
noiseModel::Constrained::shared_ptr noise = noiseModel::Constrained::shared_ptr noise =
noiseModel::Constrained::MixedSigmas((Vector(5)<<-1, -1, -1, -1, -1)); noiseModel::Constrained::MixedSigmas((Vector(5) << -1, -1, -1, -1, -1));
graph.push_back(JacobianFactor(X(1), A1, X(2), A2, b, noise)); graph.push_back(JacobianFactor(X(1), A1, X(2), A2, b, noise));
return graph; return graph;
@ -209,8 +212,8 @@ TEST(QPSolver, optimizeMatlabEx) {
VectorValues solution; VectorValues solution;
boost::tie(solution, boost::tuples::ignore) = solver.optimize(initials); boost::tie(solution, boost::tuples::ignore) = solver.optimize(initials);
VectorValues expectedSolution; VectorValues expectedSolution;
expectedSolution.insert(X(1), (Vector(1)<< 2.0/3.0)); expectedSolution.insert(X(1), (Vector(1) << 2.0 / 3.0));
expectedSolution.insert(X(2), (Vector(1)<< 4.0/3.0)); expectedSolution.insert(X(2), (Vector(1) << 4.0 / 3.0));
CHECK(assert_equal(expectedSolution, solution, 1e-7)); CHECK(assert_equal(expectedSolution, solution, 1e-7));
} }
@ -219,17 +222,23 @@ TEST(QPSolver, optimizeMatlabEx) {
GaussianFactorGraph createTestNocedal06bookEx16_4() { GaussianFactorGraph createTestNocedal06bookEx16_4() {
GaussianFactorGraph graph; GaussianFactorGraph graph;
graph.push_back(JacobianFactor(X(1), ones(1,1), ones(1))); graph.push_back(JacobianFactor(X(1), ones(1, 1), ones(1)));
graph.push_back(JacobianFactor(X(2), ones(1,1), 2.5*ones(1))); graph.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1)));
// Inequality constraints // Inequality constraints
noiseModel::Constrained::shared_ptr noise = noiseModel::Constrained::MixedSigmas( noiseModel::Constrained::shared_ptr noise =
(Vector(1) << -1)); noiseModel::Constrained::MixedSigmas((Vector(1) << -1));
graph.push_back(JacobianFactor(X(1), -ones(1,1), X(2), 2*ones(1,1), 2*ones(1), noise)); graph.push_back(
graph.push_back(JacobianFactor(X(1), ones(1,1), X(2), 2*ones(1,1), 6*ones(1), noise)); JacobianFactor(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), 2 * ones(1),
graph.push_back(JacobianFactor(X(1), ones(1,1), X(2),-2*ones(1,1), 2*ones(1), noise)); noise));
graph.push_back(JacobianFactor(X(1), -ones(1,1), zero(1), noise)); graph.push_back(
graph.push_back(JacobianFactor(X(2), -ones(1,1), zero(1), noise)); JacobianFactor(X(1), ones(1, 1), X(2), 2 * ones(1, 1), 6 * ones(1),
noise));
graph.push_back(
JacobianFactor(X(1), ones(1, 1), X(2), -2 * ones(1, 1), 2 * ones(1),
noise));
graph.push_back(JacobianFactor(X(1), -ones(1, 1), zero(1), noise));
graph.push_back(JacobianFactor(X(2), -ones(1, 1), zero(1), noise));
return graph; return graph;
} }
@ -238,54 +247,60 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4) {
GaussianFactorGraph graph = createTestNocedal06bookEx16_4(); GaussianFactorGraph graph = createTestNocedal06bookEx16_4();
QPSolver solver(graph); QPSolver solver(graph);
VectorValues initials; VectorValues initials;
initials.insert(X(1), (Vector(1)<<2.0)); initials.insert(X(1), (Vector(1) << 2.0));
initials.insert(X(2), zero(1)); initials.insert(X(2), zero(1));
VectorValues solution; VectorValues solution;
boost::tie(solution, boost::tuples::ignore) = solver.optimize(initials); boost::tie(solution, boost::tuples::ignore) = solver.optimize(initials);
VectorValues expectedSolution; VectorValues expectedSolution;
expectedSolution.insert(X(1), (Vector(1)<< 1.4)); expectedSolution.insert(X(1), (Vector(1) << 1.4));
expectedSolution.insert(X(2), (Vector(1)<< 1.7)); expectedSolution.insert(X(2), (Vector(1) << 1.7));
CHECK(assert_equal(expectedSolution, solution, 1e-7)); CHECK(assert_equal(expectedSolution, solution, 1e-7));
} }
/* ************************************************************************* */ /* ************************************************************************* */
/* Create test graph as in Nocedal06book, Ex 16.4, pg. 475 /* Create test graph as in Nocedal06book, Ex 16.4, pg. 475
with the first constraint (16.49b) is replaced by with the first constraint (16.49b) is replaced by
x1 - 2 x2 - 1 >=0 x1 - 2 x2 - 1 >=0
so that the trivial initial point (0,0) is infeasible so that the trivial initial point (0,0) is infeasible
==== ====
H = [2 0; 0 2]; H = [2 0; 0 2];
f = [-2; -5]; f = [-2; -5];
A =[-1 2; A =[-1 2;
1 2 1 2
1 -2]; 1 -2];
b = [-1; 6; 2]; b = [-1; 6; 2];
lb = zeros(2,1); lb = zeros(2,1);
opts = optimoptions('quadprog','Algorithm','active-set','Display','off'); opts = optimoptions('quadprog','Algorithm','active-set','Display','off');
[x,fval,exitflag,output,lambda] = ... [x,fval,exitflag,output,lambda] = ...
quadprog(H,f,A,b,[],[],lb,[],[],opts); quadprog(H,f,A,b,[],[],lb,[],[],opts);
==== ====
x = x =
2.0000 2.0000
0.5000 0.5000
*/ */
GaussianFactorGraph modifyNocedal06bookEx16_4() { GaussianFactorGraph modifyNocedal06bookEx16_4() {
GaussianFactorGraph graph; GaussianFactorGraph graph;
graph.push_back(JacobianFactor(X(1), ones(1,1), ones(1))); graph.push_back(JacobianFactor(X(1), ones(1, 1), ones(1)));
graph.push_back(JacobianFactor(X(2), ones(1,1), 2.5*ones(1))); graph.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1)));
// Inequality constraints // Inequality constraints
noiseModel::Constrained::shared_ptr noise = noiseModel::Constrained::MixedSigmas( noiseModel::Constrained::shared_ptr noise =
(Vector(1) << -1)); noiseModel::Constrained::MixedSigmas((Vector(1) << -1));
graph.push_back(JacobianFactor(X(1), -ones(1,1), X(2), 2*ones(1,1), -1*ones(1), noise)); graph.push_back(
graph.push_back(JacobianFactor(X(1), ones(1,1), X(2), 2*ones(1,1), 6*ones(1), noise)); JacobianFactor(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), -1 * ones(1),
graph.push_back(JacobianFactor(X(1), ones(1,1), X(2),-2*ones(1,1), 2*ones(1), noise)); noise));
graph.push_back(JacobianFactor(X(1), -ones(1,1), zero(1), noise)); graph.push_back(
graph.push_back(JacobianFactor(X(2), -ones(1,1), zero(1), noise)); JacobianFactor(X(1), ones(1, 1), X(2), 2 * ones(1, 1), 6 * ones(1),
noise));
graph.push_back(
JacobianFactor(X(1), ones(1, 1), X(2), -2 * ones(1, 1), 2 * ones(1),
noise));
graph.push_back(JacobianFactor(X(1), -ones(1, 1), zero(1), noise));
graph.push_back(JacobianFactor(X(2), -ones(1, 1), zero(1), noise));
return graph; return graph;
} }
@ -306,23 +321,31 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4_findInitialPoint) {
EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey+4))); EXPECT(assert_equal(zero(1), initialsLP.at(firstSlackKey+4)));
VectorValues objCoeffs = solver.objectiveCoeffsLP(firstSlackKey); VectorValues objCoeffs = solver.objectiveCoeffsLP(firstSlackKey);
for (size_t i = 0; i<5; ++i) for (size_t i = 0; i < 5; ++i)
EXPECT(assert_equal(ones(1), objCoeffs.at(firstSlackKey+i))); EXPECT(assert_equal(ones(1), objCoeffs.at(firstSlackKey+i)));
GaussianFactorGraph::shared_ptr constraints; GaussianFactorGraph::shared_ptr constraints;
VectorValues lowerBounds; VectorValues lowerBounds;
boost::tie(constraints, lowerBounds) = solver.constraintsLP(firstSlackKey); boost::tie(constraints, lowerBounds) = solver.constraintsLP(firstSlackKey);
for (size_t i = 0; i<5; ++i) for (size_t i = 0; i < 5; ++i)
EXPECT(assert_equal(zero(1), lowerBounds.at(firstSlackKey+i))); EXPECT(assert_equal(zero(1), lowerBounds.at(firstSlackKey+i)));
GaussianFactorGraph expectedConstraints; GaussianFactorGraph expectedConstraints;
noiseModel::Constrained::shared_ptr noise = noiseModel::Constrained::MixedSigmas( noiseModel::Constrained::shared_ptr noise =
(Vector(1) << -1)); noiseModel::Constrained::MixedSigmas((Vector(1) << -1));
expectedConstraints.push_back(JacobianFactor(X(1), -ones(1,1), X(2), 2*ones(1,1), X(3), -ones(1,1),-1*ones(1), noise)); expectedConstraints.push_back(
expectedConstraints.push_back(JacobianFactor(X(1), ones(1,1), X(2), 2*ones(1,1), X(4), -ones(1,1), 6*ones(1), noise)); JacobianFactor(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), X(3), -ones(1, 1),
expectedConstraints.push_back(JacobianFactor(X(1), ones(1,1), X(2),-2*ones(1,1), X(5), -ones(1,1), 2*ones(1), noise)); -1 * ones(1), noise));
expectedConstraints.push_back(JacobianFactor(X(1), -ones(1,1), X(6), -ones(1,1), zero(1), noise)); expectedConstraints.push_back(
expectedConstraints.push_back(JacobianFactor(X(2), -ones(1,1), X(7), -ones(1,1), zero(1), noise)); JacobianFactor(X(1), ones(1, 1), X(2), 2 * ones(1, 1), X(4), -ones(1, 1),
6 * ones(1), noise));
expectedConstraints.push_back(
JacobianFactor(X(1), ones(1, 1), X(2), -2 * ones(1, 1), X(5), -ones(1, 1),
2 * ones(1), noise));
expectedConstraints.push_back(
JacobianFactor(X(1), -ones(1, 1), X(6), -ones(1, 1), zero(1), noise));
expectedConstraints.push_back(
JacobianFactor(X(2), -ones(1, 1), X(7), -ones(1, 1), zero(1), noise));
EXPECT(assert_equal(expectedConstraints, *constraints)); EXPECT(assert_equal(expectedConstraints, *constraints));
bool isFeasible; bool isFeasible;
@ -341,12 +364,12 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4_2) {
GaussianFactorGraph graph = createTestNocedal06bookEx16_4(); GaussianFactorGraph graph = createTestNocedal06bookEx16_4();
QPSolver solver(graph); QPSolver solver(graph);
VectorValues initials; VectorValues initials;
initials.insert(X(1), (Vector(1)<<0.0)); initials.insert(X(1), (Vector(1) << 0.0));
initials.insert(X(2), (Vector(1)<<100.0)); initials.insert(X(2), (Vector(1) << 100.0));
VectorValues expectedSolution; VectorValues expectedSolution;
expectedSolution.insert(X(1), (Vector(1)<< 1.4)); expectedSolution.insert(X(1), (Vector(1) << 1.4));
expectedSolution.insert(X(2), (Vector(1)<< 1.7)); expectedSolution.insert(X(2), (Vector(1) << 1.7));
VectorValues solution; VectorValues solution;
boost::tie(solution, boost::tuples::ignore) = solver.optimize(initials); boost::tie(solution, boost::tuples::ignore) = solver.optimize(initials);
@ -363,12 +386,13 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4_2) {
TEST(QPSolver, failedSubproblem) { TEST(QPSolver, failedSubproblem) {
GaussianFactorGraph graph; GaussianFactorGraph graph;
graph.push_back(JacobianFactor(X(1), eye(2), zero(2))); graph.push_back(JacobianFactor(X(1), eye(2), zero(2)));
graph.push_back(HessianFactor(X(1), zeros(2,2), zero(2), 100.0)); graph.push_back(HessianFactor(X(1), zeros(2, 2), zero(2), 100.0));
graph.push_back(JacobianFactor(X(1), (Matrix(1,2)<<-1.0, 0.0), -ones(1), graph.push_back(
noiseModel::Constrained::MixedSigmas(-ones(1)))); JacobianFactor(X(1), (Matrix(1, 2) << -1.0, 0.0), -ones(1),
noiseModel::Constrained::MixedSigmas(-ones(1))));
VectorValues expected; VectorValues expected;
expected.insert(X(1), (Vector(2)<< 1.0, 0.0)); expected.insert(X(1), (Vector(2) << 1.0, 0.0));
QPSolver solver(graph); QPSolver solver(graph);
VectorValues solution; VectorValues solution;