Headers and standard formatting

release/4.3a0
dellaert 2014-11-26 09:04:34 +01:00
parent b9d0373c47
commit 7aaf6a1e82
5 changed files with 365 additions and 272 deletions

View File

@ -5,10 +5,12 @@
* @author: Duy-Nguyen Ta
*/
#include <gtsam_unstable/linear/LPSolver.h>
#include <gtsam/inference/Symbol.h>
#include <boost/format.hpp>
#include <boost/foreach.hpp>
#include <boost/range/adaptor/map.hpp>
#include <gtsam/inference/Symbol.h>
#include <gtsam_unstable/linear/LPSolver.h>
using namespace std;
using namespace gtsam;
@ -29,10 +31,10 @@ void LPSolver::buildMetaInformation() {
VariableIndex factorIndex(*constraints_);
BOOST_FOREACH(Key key, factorIndex | boost::adaptors::map_keys) {
if (!variableColumnNo_.count(key)) {
JacobianFactor::shared_ptr jacobian = boost::dynamic_pointer_cast<JacobianFactor>
(constraints_->at(*factorIndex[key].begin()));
JacobianFactor::shared_ptr jacobian = boost::dynamic_pointer_cast<
JacobianFactor>(constraints_->at(*factorIndex[key].begin()));
if (!jacobian || !jacobian->isConstrained()) {
throw std::runtime_error("Invalid constrained graph!");
throw runtime_error("Invalid constrained graph!");
}
size_t dim = jacobian->getDim(jacobian->find(key));
variableColumnNo_.insert(make_pair(key, firstVarIndex));
@ -67,7 +69,7 @@ void LPSolver::buildMetaInformation() {
void LPSolver::addConstraints(const boost::shared_ptr<lprec>& lp,
const JacobianFactor::shared_ptr& jacobian) const {
if (!jacobian || !jacobian->isConstrained())
throw std::runtime_error("LP only accepts constrained factors!");
throw runtime_error("LP only accepts constrained factors!");
// Build column number from keys
KeyVector keys = jacobian->keys();
@ -87,16 +89,16 @@ void LPSolver::addConstraints(const boost::shared_ptr<lprec>& lp,
vector<int> columnNoCopy(columnNo);
if (sigmas[i] > 0) {
cout << "Warning: Ignore Gaussian noise (sigma>0) in LP constraints!" << endl;
cout << "Warning: Ignore Gaussian noise (sigma>0) in LP constraints!"
<< endl;
}
int constraintType = (sigmas[i] < 0) ? LE : EQ;
if(!add_constraintex(lp.get(), columnNoCopy.size(), r.data(), columnNoCopy.data(),
constraintType, b[i]))
if (!add_constraintex(lp.get(), columnNoCopy.size(), r.data(),
columnNoCopy.data(), constraintType, b[i]))
throw runtime_error("LP can't accept Gaussian noise!");
}
}
/* ************************************************************************* */
void LPSolver::addBounds(const boost::shared_ptr<lprec>& lp) const {
// Set lower bounds
@ -132,8 +134,8 @@ boost::shared_ptr<lprec> LPSolver::buildModel() const {
// Add constraints
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, *constraints_) {
JacobianFactor::shared_ptr jacobian =
boost::dynamic_pointer_cast<JacobianFactor>(factor);
JacobianFactor::shared_ptr jacobian = boost::dynamic_pointer_cast<
JacobianFactor>(factor);
addConstraints(lp, jacobian);
}
@ -153,7 +155,7 @@ boost::shared_ptr<lprec> LPSolver::buildModel() const {
vector<int> columnNo = buildColumnNo(keys);
if (!set_obj_fnex(lp.get(), f.size(), f.data(), columnNo.data()))
throw std::runtime_error("lpsolve cannot set obj function!");
throw runtime_error("lpsolve cannot set obj function!");
// Set the object direction to minimize
set_minim(lp.get());
@ -169,7 +171,8 @@ VectorValues LPSolver::convertToVectorValues(REAL* row) const {
VectorValues values;
BOOST_FOREACH(Key key, variableColumnNo_ | boost::adaptors::map_keys) {
// Warning: the columnNo starts from 1, but C's array index starts from 0!!
Vector v = Eigen::Map<Eigen::VectorXd>(&row[variableColumnNo_.at(key)-1], variableDims_.at(key));
Vector v = Eigen::Map<Eigen::VectorXd>(&row[variableColumnNo_.at(key) - 1],
variableDims_.at(key));
values.insert(key, v);
}
return values;
@ -183,12 +186,14 @@ VectorValues LPSolver::solve() const {
/* just out of curioucity, now show the model in lp format on screen */
/* this only works if this is a console application. If not, use write_lp and a filename */
if (debug) write_LP(lp.get(), stdout);
if (debug)
write_LP(lp.get(), stdout);
int ret = ::solve(lp.get());
if (ret != 0) {
throw std::runtime_error(
(boost::format( "lpsolve cannot find the optimal solution and terminates with %d error. "\
throw runtime_error(
(boost::format(
"lpsolve cannot find the optimal solution and terminates with %d error. "
"See lpsolve's solve() documentation for details.") % ret).str());
}
REAL* row = NULL;

View File

@ -7,13 +7,14 @@
#pragma once
#include <boost/range/irange.hpp>
#include <gtsam/3rdparty/lp_solve_5.5/lp_lib.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/VectorValues.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/3rdparty/lp_solve_5.5/lp_lib.h>
#include <boost/range/irange.hpp>
namespace gtsam {
/**
@ -36,8 +37,10 @@ public:
* We do NOT adopt this convention here. If no lower/upper bounds are specified, the variable will be
* set as unbounded, i.e. -inf <= x <= inf.
*/
LPSolver(const VectorValues& objectiveCoeffs, const GaussianFactorGraph::shared_ptr& constraints,
const VectorValues& lowerBounds = VectorValues(), const VectorValues& upperBounds = VectorValues()) :
LPSolver(const VectorValues& objectiveCoeffs,
const GaussianFactorGraph::shared_ptr& constraints,
const VectorValues& lowerBounds = VectorValues(),
const VectorValues& upperBounds = VectorValues()) :
objectiveCoeffs_(objectiveCoeffs), constraints_(constraints), lowerBounds_(
lowerBounds), upperBounds_(upperBounds) {
buildMetaInformation();
@ -51,10 +54,18 @@ public:
void buildMetaInformation();
/// Get functions for unittest checking
const std::map<Key, size_t>& variableColumnNo() const { return variableColumnNo_; }
const std::map<Key, size_t>& variableDims() const { return variableDims_; }
size_t nrColumns() const {return nrColumns_;}
const KeySet& freeVars() const { return freeVars_; }
const std::map<Key, size_t>& variableColumnNo() const {
return variableColumnNo_;
}
const std::map<Key, size_t>& variableDims() const {
return variableDims_;
}
size_t nrColumns() const {
return nrColumns_;
}
const KeySet& freeVars() const {
return freeVars_;
}
/**
* Build lpsolve's column number for a list of keys
@ -64,7 +75,8 @@ public:
std::vector<int> columnNo;
BOOST_FOREACH(Key key, keyList) {
std::vector<int> varIndices = boost::copy_range<std::vector<int> >(
boost::irange(variableColumnNo_.at(key), variableColumnNo_.at(key) + variableDims_.at(key)));
boost::irange(variableColumnNo_.at(key),
variableColumnNo_.at(key) + variableDims_.at(key)));
columnNo.insert(columnNo.end(), varIndices.begin(), varIndices.end());
}
return columnNo;
@ -82,7 +94,6 @@ public:
*/
void addBounds(const boost::shared_ptr<lprec>& lp) const;
/**
* Main function to build lpsolve model
* TODO: consider lp as a class variable and support setConstraints

View File

@ -5,12 +5,13 @@
* @author: thduynguyen
*/
#include <boost/foreach.hpp>
#include <boost/range/adaptor/map.hpp>
#include <gtsam/inference/Symbol.h>
#include <gtsam_unstable/linear/QPSolver.h>
#include <gtsam_unstable/linear/LPSolver.h>
#include <boost/foreach.hpp>
#include <boost/range/adaptor/map.hpp>
using namespace std;
namespace gtsam {
@ -30,21 +31,21 @@ QPSolver::QPSolver(const GaussianFactorGraph& graph) :
}
// Collect constrained variable keys
std::set<size_t> constrainedVars;
set<size_t> constrainedVars;
BOOST_FOREACH(size_t index, constraintIndices_) {
KeyVector keys = graph.at(index)->keys();
constrainedVars.insert(keys.begin(), keys.end());
}
// Collect unconstrained hessians of constrained vars to build dual graph
freeHessians_ = unconstrainedHessiansOfConstrainedVars(graph, constrainedVars);
freeHessians_ = unconstrainedHessiansOfConstrainedVars(graph,
constrainedVars);
freeHessianFactorIndex_ = VariableIndex(*freeHessians_);
}
/* ************************************************************************* */
GaussianFactorGraph::shared_ptr QPSolver::unconstrainedHessiansOfConstrainedVars(
const GaussianFactorGraph& graph, const std::set<Key>& constrainedVars) const {
const GaussianFactorGraph& graph, const set<Key>& constrainedVars) const {
VariableIndex variableIndex(graph);
GaussianFactorGraph::shared_ptr hfg(new GaussianFactorGraph());
// Collect all factors involving constrained vars
@ -58,7 +59,8 @@ GaussianFactorGraph::shared_ptr QPSolver::unconstrainedHessiansOfConstrainedVars
// Convert each factor into Hessian
BOOST_FOREACH(size_t factorIndex, factors) {
if (!graph[factorIndex]) continue;
if (!graph[factorIndex])
continue;
// See if this is a Jacobian factor
JacobianFactor::shared_ptr jf = toJacobian(graph[factorIndex]);
if (jf) {
@ -69,7 +71,8 @@ GaussianFactorGraph::shared_ptr QPSolver::unconstrainedHessiansOfConstrainedVars
Vector newPrecisions(sigmas.size());
bool mixed = false;
for (size_t s = 0; s < sigmas.size(); ++s) {
if (sigmas[s] <= 1e-9) newPrecisions[s] = 0.0; // 0 info for constraints (both ineq and eq)
if (sigmas[s] <= 1e-9)
newPrecisions[s] = 0.0; // 0 info for constraints (both ineq and eq)
else {
newPrecisions[s] = 1.0 / sigmas[s];
mixed = true;
@ -77,11 +80,11 @@ GaussianFactorGraph::shared_ptr QPSolver::unconstrainedHessiansOfConstrainedVars
}
if (mixed) { // only add free hessians if it's mixed
JacobianFactor::shared_ptr newJacobian = toJacobian(jf->clone());
newJacobian->setModel(noiseModel::Diagonal::Precisions(newPrecisions));
newJacobian->setModel(
noiseModel::Diagonal::Precisions(newPrecisions));
hfg->push_back(HessianFactor(*newJacobian));
}
}
else { // unconstrained Jacobian
} else { // unconstrained Jacobian
// Convert the original linear factor to Hessian factor
// TODO: This may fail and throw the following exception
// Assertion failed: (((!PanelMode) && stride==0 && offset==0) ||
@ -92,8 +95,7 @@ GaussianFactorGraph::shared_ptr QPSolver::unconstrainedHessiansOfConstrainedVars
// My current way to fix this is to compile both gtsam and my library in Release mode
hfg->add(HessianFactor(*jf));
}
}
else { // If it's not a Jacobian, it should be a hessian factor. Just add!
} else { // If it's not a Jacobian, it should be a hessian factor. Just add!
hfg->push_back(graph[factorIndex]);
}
}
@ -111,17 +113,23 @@ GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph,
// For each variable xi involving in some constraint, compute the unconstrained gradient
// wrt xi from the prebuilt freeHessian graph
// \grad f(xi) = \frac{\partial f}{\partial xi}' = \sum_j G_ij*xj - gi
if (debug) freeHessianFactorIndex_.print("freeHessianFactorIndex_: ");
if (debug)
freeHessianFactorIndex_.print("freeHessianFactorIndex_: ");
BOOST_FOREACH(const VariableIndex::value_type& xiKey_factors, freeHessianFactorIndex_) {
Key xiKey = xiKey_factors.first;
VariableIndex::Factors xiFactors = xiKey_factors.second;
// Find xi's dim from the first factor on xi
if (xiFactors.size() == 0) continue;
GaussianFactor::shared_ptr xiFactor0 = freeHessians_->at(*xiFactors.begin());
if (xiFactors.size() == 0)
continue;
GaussianFactor::shared_ptr xiFactor0 = freeHessians_->at(
*xiFactors.begin());
size_t xiDim = xiFactor0->getDim(xiFactor0->find(xiKey));
if (debug) xiFactor0->print("xiFactor0: ");
if (debug) cout << "xiKey: " << string(Symbol(xiKey)) << ", xiDim: " << xiDim << endl;
if (debug)
xiFactor0->print("xiFactor0: ");
if (debug)
cout << "xiKey: " << string(Symbol(xiKey)) << ", xiDim: " << xiDim
<< endl;
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// Compute the b-vector for the dual factor Ax-b
@ -139,8 +147,7 @@ GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph,
if (xi > xj) {
Matrix Gji = factor->info(xj, xi);
Gij = Gji.transpose();
}
else {
} else {
Gij = factor->info(xi, xj);
}
// Accumulate Gij*xj to gradf
@ -155,16 +162,18 @@ GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph,
// Compute the Jacobian A for the dual factor Ax-b
// Obtain the jacobians for lambda variables from their corresponding constraints
// A = gradc_k(xi) = \frac{\partial c_k}{\partial xi}'
std::vector<std::pair<Key, Matrix> > lambdaTerms; // collection of lambda_k, and gradc_k
typedef std::pair<size_t, size_t> FactorIx_SigmaIx;
std::vector<FactorIx_SigmaIx> unconstrainedIndex; // pairs of factorIx,sigmaIx of unconstrained rows
vector<pair<Key, Matrix> > lambdaTerms; // collection of lambda_k, and gradc_k
typedef pair<size_t, size_t> FactorIx_SigmaIx;
vector<FactorIx_SigmaIx> unconstrainedIndex; // pairs of factorIx,sigmaIx of unconstrained rows
BOOST_FOREACH(size_t factorIndex, fullFactorIndices_[xiKey]) {
JacobianFactor::shared_ptr factor = toJacobian(graph.at(factorIndex));
if (!factor || !factor->isConstrained()) continue;
if (!factor || !factor->isConstrained())
continue;
// Gradient is the transpose of the Jacobian: A_k = gradc_k(xi) = \frac{\partial c_k}{\partial xi}'
// Each column for each lambda_k corresponds to [the transpose of] each constrained row factor
Matrix A_k = factor->getA(factor->find(xiKey)).transpose();
if (debug) gtsam::print(A_k, "A_k = ");
if (debug)
gtsam::print(A_k, "A_k = ");
// Deal with mixed sigmas: no information if sigma != 0
Vector sigmas = factor->get_model()->sigmas();
@ -185,22 +194,27 @@ GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph,
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// Create and add factors to the dual graph
// If least square approximation is desired, use unit noise model.
if (debug) cout << "Create dual factor" << endl;
if (debug)
cout << "Create dual factor" << endl;
if (useLeastSquare) {
if (debug) cout << "use least square!" << endl;
dualGraph.push_back(JacobianFactor(lambdaTerms, gradf_xi,
if (debug)
cout << "use least square!" << endl;
dualGraph.push_back(
JacobianFactor(lambdaTerms, gradf_xi,
noiseModel::Unit::Create(gradf_xi.size())));
}
else {
} else {
// Enforce constrained noise model so lambdas are solved with QR
// and should exactly satisfy all the equations
if (debug) cout << gradf_xi << endl;
dualGraph.push_back(JacobianFactor(lambdaTerms, gradf_xi,
if (debug)
cout << gradf_xi << endl;
dualGraph.push_back(
JacobianFactor(lambdaTerms, gradf_xi,
noiseModel::Constrained::All(gradf_xi.size())));
}
// Add 0 priors on all lambdas of the unconstrained rows to make sure the graph is solvable
if (debug) cout << "Create priors" << endl;
if (debug)
cout << "Create priors" << endl;
BOOST_FOREACH(FactorIx_SigmaIx factorIx_sigmaIx, unconstrainedIndex) {
size_t factorIx = factorIx_sigmaIx.first;
JacobianFactor::shared_ptr factor = toJacobian(graph.at(factorIx));
@ -209,7 +223,8 @@ GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph,
size_t sigmaIx = factorIx_sigmaIx.second;
J(sigmaIx, sigmaIx) = 1.0;
// Use factorIndex as the lambda's key.
if (debug) cout << "prior for factor " << factorIx << endl;
if (debug)
cout << "prior for factor " << factorIx << endl;
dualGraph.push_back(JacobianFactor(factorIx, J, zero(dim)));
}
}
@ -218,7 +233,8 @@ GaussianFactorGraph QPSolver::buildDualGraph(const GaussianFactorGraph& graph,
}
/* ************************************************************************* */
std::pair<int, int> QPSolver::findWorstViolatedActiveIneq(const VectorValues& lambdas) const {
pair<int, int> QPSolver::findWorstViolatedActiveIneq(
const VectorValues& lambdas) const {
int worstFactorIx = -1, worstSigmaIx = -1;
// preset the maxLambda to 0.0: if lambda is <= 0.0, the constraint is either
// inactive or a good ineq constraint, so we don't care!
@ -237,9 +253,9 @@ std::pair<int, int> QPSolver::findWorstViolatedActiveIneq(const VectorValues& la
return make_pair(worstFactorIx, worstSigmaIx);
}
/* ************************************************************************* */
bool QPSolver::updateWorkingSetInplace(GaussianFactorGraph& workingGraph,
int factorIx, int sigmaIx, double newSigma) const {
/* ************************************************************************* */bool QPSolver::updateWorkingSetInplace(
GaussianFactorGraph& workingGraph, int factorIx, int sigmaIx,
double newSigma) const {
if (factorIx < 0 || sigmaIx < 0)
return false;
Vector sigmas = toJacobian(workingGraph.at(factorIx))->get_model()->sigmas();
@ -264,8 +280,9 @@ bool QPSolver::updateWorkingSetInplace(GaussianFactorGraph& workingGraph,
*
* We want the minimum of all those alphas among all inactive ineq.
*/
boost::tuple<double, int, int> QPSolver::computeStepSize(const GaussianFactorGraph& workingGraph,
const VectorValues& xk, const VectorValues& p) const {
boost::tuple<double, int, int> QPSolver::computeStepSize(
const GaussianFactorGraph& workingGraph, const VectorValues& xk,
const VectorValues& p) const {
static bool debug = false;
double minAlpha = 1.0;
@ -279,28 +296,34 @@ boost::tuple<double, int, int> QPSolver::computeStepSize(const GaussianFactorGra
if (sigmas[s] < 0) {
// Compute aj'*p
double ajTp = 0.0;
for (Factor::const_iterator xj = jacobian->begin(); xj != jacobian->end(); ++xj) {
for (Factor::const_iterator xj = jacobian->begin();
xj != jacobian->end(); ++xj) {
Vector pj = p.at(*xj);
Vector aj = jacobian->getA(xj).row(s);
ajTp += aj.dot(pj);
}
if (debug) cout << "s, ajTp, b[s]: " << s << " " << ajTp << " " << b[s] << endl;
if (debug)
cout << "s, ajTp, b[s]: " << s << " " << ajTp << " " << b[s] << endl;
// Check if aj'*p >0. Don't care if it's not.
if (ajTp<=0) continue;
if (ajTp <= 0)
continue;
// Compute aj'*xk
double ajTx = 0.0;
for (Factor::const_iterator xj = jacobian->begin(); xj != jacobian->end(); ++xj) {
for (Factor::const_iterator xj = jacobian->begin();
xj != jacobian->end(); ++xj) {
Vector xkj = xk.at(*xj);
Vector aj = jacobian->getA(xj).row(s);
ajTx += aj.dot(xkj);
}
if (debug) cout << "b[s], ajTx: " << b[s] << " " << ajTx << " " << ajTp << endl;
if (debug)
cout << "b[s], ajTx: " << b[s] << " " << ajTx << " " << ajTp << endl;
// alpha = (bj - aj'*xk) / (aj'*p)
double alpha = (b[s] - ajTx) / ajTp;
if (debug) cout << "alpha: " << alpha << endl;
if (debug)
cout << "alpha: " << alpha << endl;
// We want the minimum of all those max alphas
if (alpha < minAlpha) {
@ -314,41 +337,52 @@ boost::tuple<double, int, int> QPSolver::computeStepSize(const GaussianFactorGra
return boost::make_tuple(minAlpha, closestFactorIx, closestSigmaIx);
}
/* ************************************************************************* */
bool QPSolver::iterateInPlace(GaussianFactorGraph& workingGraph, VectorValues& currentSolution, VectorValues& lambdas) const {
/* ************************************************************************* */bool QPSolver::iterateInPlace(
GaussianFactorGraph& workingGraph, VectorValues& currentSolution,
VectorValues& lambdas) const {
static bool debug = false;
if (debug) workingGraph.print("workingGraph: ");
if (debug)
workingGraph.print("workingGraph: ");
// Obtain the solution from the current working graph
VectorValues newSolution = workingGraph.optimize();
if (debug) newSolution.print("New solution:");
if (debug)
newSolution.print("New solution:");
// If we CAN'T move further
if (newSolution.equals(currentSolution, 1e-5)) {
// Compute lambda from the dual graph
if (debug) cout << "Building dual graph..." << endl;
if (debug)
cout << "Building dual graph..." << endl;
GaussianFactorGraph dualGraph = buildDualGraph(workingGraph, newSolution);
if (debug) dualGraph.print("Dual graph: ");
if (debug)
dualGraph.print("Dual graph: ");
lambdas = dualGraph.optimize();
if (debug) lambdas.print("lambdas :");
if (debug)
lambdas.print("lambdas :");
int factorIx, sigmaIx;
boost::tie(factorIx, sigmaIx) = findWorstViolatedActiveIneq(lambdas);
if (debug) cout << "violated active ineq - factorIx, sigmaIx: " << factorIx << " " << sigmaIx << endl;
if (debug)
cout << "violated active ineq - factorIx, sigmaIx: " << factorIx << " "
<< sigmaIx << endl;
// Try to disactivate the weakest violated ineq constraints
// if not successful, i.e. all ineq constraints are satisfied: We have the solution!!
if (!updateWorkingSetInplace(workingGraph, factorIx, sigmaIx, -1.0))
return true;
}
else {
} else {
// If we CAN make some progress
// Adapt stepsize if some inactive inequality constraints complain about this move
if (debug) cout << "Computing stepsize..." << endl;
if (debug)
cout << "Computing stepsize..." << endl;
double alpha;
int factorIx, sigmaIx;
VectorValues p = newSolution - currentSolution;
boost::tie(alpha, factorIx, sigmaIx) = computeStepSize(workingGraph, currentSolution, p);
if (debug) cout << "alpha, factorIx, sigmaIx: " << alpha << " " << factorIx << " " << sigmaIx << endl;
boost::tie(alpha, factorIx, sigmaIx) = computeStepSize(workingGraph,
currentSolution, p);
if (debug)
cout << "alpha, factorIx, sigmaIx: " << alpha << " " << factorIx << " "
<< sigmaIx << endl;
// also add to the working set the one that complains the most
updateWorkingSetInplace(workingGraph, factorIx, sigmaIx, 0.0);
// step!
@ -367,7 +401,8 @@ bool QPSolver::iterateInPlace(GaussianFactorGraph& workingGraph, VectorValues& c
}
/* ************************************************************************* */
std::pair<VectorValues, VectorValues> QPSolver::optimize(const VectorValues& initials) const {
pair<VectorValues, VectorValues> QPSolver::optimize(
const VectorValues& initials) const {
GaussianFactorGraph workingGraph = graph_.clone();
VectorValues currentSolution = initials;
VectorValues lambdas;
@ -379,10 +414,11 @@ std::pair<VectorValues, VectorValues> QPSolver::optimize(const VectorValues& ini
}
/* ************************************************************************* */
std::pair<VectorValues, Key> QPSolver::initialValuesLP() const {
pair<VectorValues, Key> QPSolver::initialValuesLP() const {
size_t firstSlackKey = 0;
BOOST_FOREACH(Key key, fullFactorIndices_ | boost::adaptors::map_keys) {
if (firstSlackKey < key) firstSlackKey = key;
if (firstSlackKey < key)
firstSlackKey = key;
}
firstSlackKey += 1;
@ -408,7 +444,7 @@ std::pair<VectorValues, Key> QPSolver::initialValuesLP() const {
Vector sigmas = jacobian->get_model()->sigmas();
for (size_t i = 0; i < sigmas.size(); ++i) {
if (sigmas[i] < 0) {
slackInit[i] = std::max(errorAtZero[i], 0.0);
slackInit[i] = max(errorAtZero[i], 0.0);
} else if (sigmas[i] == 0.0) {
errorAtZero[i] = fabs(errorAtZero[i]);
} // if it has >0 sigma, i.e. normal Gaussian noise, initialize it at 0
@ -439,18 +475,19 @@ VectorValues QPSolver::objectiveCoeffsLP(Key firstSlackKey) const {
}
/* ************************************************************************* */
std::pair<GaussianFactorGraph::shared_ptr, VectorValues> QPSolver::constraintsLP(
pair<GaussianFactorGraph::shared_ptr, VectorValues> QPSolver::constraintsLP(
Key firstSlackKey) const {
// Create constraints and 0 lower bounds (zi>=0)
GaussianFactorGraph::shared_ptr constraints(new GaussianFactorGraph());
VectorValues slackLowerBounds;
for (size_t key = firstSlackKey; key<firstSlackKey + constraintIndices_.size(); ++key) {
for (size_t key = firstSlackKey;
key < firstSlackKey + constraintIndices_.size(); ++key) {
size_t iFactor = constraintIndices_[key - firstSlackKey];
JacobianFactor::shared_ptr jacobian = toJacobian(graph_.at(iFactor));
// Collect old terms to form a new factor
// TODO: it might be faster if we can get the whole block matrix at once
// but I don't know how to extend the current VerticalBlockMatrix
std::vector<std::pair<Key, Matrix> > terms;
vector<pair<Key, Matrix> > terms;
for (Factor::iterator it = jacobian->begin(); it != jacobian->end(); ++it) {
terms.push_back(make_pair(*it, jacobian->getA(it)));
}
@ -459,7 +496,8 @@ std::pair<GaussianFactorGraph::shared_ptr, VectorValues> QPSolver::constraintsLP
// LE constraints ax <= b for sigma < 0.
size_t dim = jacobian->rows();
terms.push_back(make_pair(key, -eye(dim)));
constraints->push_back(JacobianFactor(terms, jacobian->getb(), jacobian->get_model()));
constraints->push_back(
JacobianFactor(terms, jacobian->getb(), jacobian->get_model()));
// Add lower bound for this slack key
slackLowerBounds.insert(key, zero(dim));
}
@ -467,7 +505,7 @@ std::pair<GaussianFactorGraph::shared_ptr, VectorValues> QPSolver::constraintsLP
}
/* ************************************************************************* */
std::pair<bool, VectorValues> QPSolver::findFeasibleInitialValues() const {
pair<bool, VectorValues> QPSolver::findFeasibleInitialValues() const {
static const bool debug = false;
// Initial values with slack variables for the LP subproblem, Nocedal06book, pg.473
VectorValues initials;
@ -486,14 +524,19 @@ std::pair<bool, VectorValues> QPSolver::findFeasibleInitialValues() const {
LPSolver lpSolver(objectiveLP, constraints, slackLowerBounds);
VectorValues solution = lpSolver.solve();
if (debug) initials.print("Initials LP: ");
if (debug) objectiveLP.print("Objective LP: ");
if (debug) constraints->print("Constraints LP: ");
if (debug) solution.print("LP solution: ");
if (debug)
initials.print("Initials LP: ");
if (debug)
objectiveLP.print("Objective LP: ");
if (debug)
constraints->print("Constraints LP: ");
if (debug)
solution.print("LP solution: ");
// Remove slack variables from solution
double slackSum = 0.0;
for (Key key = firstSlackKey; key < firstSlackKey+constraintIndices_.size(); ++key) {
for (Key key = firstSlackKey; key < firstSlackKey + constraintIndices_.size();
++key) {
slackSum += solution.at(key).cwiseAbs().sum();
solution.erase(key);
}
@ -501,7 +544,8 @@ std::pair<bool, VectorValues> QPSolver::findFeasibleInitialValues() const {
// Insert zero vectors for free variables that are not in the constraints
BOOST_FOREACH(Key key, fullFactorIndices_ | boost::adaptors::map_keys) {
if (!solution.exists(key)) {
GaussianFactor::shared_ptr factor = graph_.at(*fullFactorIndices_[key].begin());
GaussianFactor::shared_ptr factor = graph_.at(
*fullFactorIndices_[key].begin());
size_t dim = factor->getDim(factor->find(key));
solution.insert(key, zero(dim));
}
@ -511,12 +555,12 @@ std::pair<bool, VectorValues> QPSolver::findFeasibleInitialValues() const {
}
/* ************************************************************************* */
std::pair<VectorValues, VectorValues> QPSolver::optimize() const {
pair<VectorValues, VectorValues> QPSolver::optimize() const {
bool isFeasible;
VectorValues initials;
boost::tie(isFeasible, initials) = findFeasibleInitialValues();
if (!isFeasible) {
throw std::runtime_error("LP subproblem is infeasible!");
throw runtime_error("LP subproblem is infeasible!");
}
return optimize(initials);
}

View File

@ -10,6 +10,9 @@
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/VectorValues.h>
#include <vector>
#include <set>
namespace gtsam {
/**
@ -35,8 +38,9 @@ public:
QPSolver(const GaussianFactorGraph& graph);
/// Return indices of all constrained factors
FastVector<size_t> constraintIndices() const { return constraintIndices_; }
FastVector<size_t> constraintIndices() const {
return constraintIndices_;
}
/// Return the Hessian factor graph of constrained variables
GaussianFactorGraph::shared_ptr freeHessiansOfConstrainedVars() const {
@ -73,7 +77,6 @@ public:
GaussianFactorGraph buildDualGraph(const GaussianFactorGraph& graph,
const VectorValues& x0, bool useLeastSquare = false) const;
/**
* Find the BAD active ineq that pulls x strongest to the wrong direction of its constraint
* (i.e. it is pulling towards >0, while its feasible region is <=0)
@ -95,15 +98,16 @@ public:
* So active ineqs with lambda > 0 are BAD. And we want the worst one with the largest lambda.
*
*/
std::pair<int, int> findWorstViolatedActiveIneq(const VectorValues& lambdas) const;
std::pair<int, int> findWorstViolatedActiveIneq(
const VectorValues& lambdas) const;
/**
* Deactivate or activate an ineq constraint in place
* Warning: modify in-place to avoid copy/clone
* @return true if update successful
*/
bool updateWorkingSetInplace(GaussianFactorGraph& workingGraph,
int factorIx, int sigmaIx, double newSigma) const;
bool updateWorkingSetInplace(GaussianFactorGraph& workingGraph, int factorIx,
int sigmaIx, double newSigma) const;
/**
* Compute step size alpha for the new solution x' = xk + alpha*p, where alpha \in [0,1]
@ -113,12 +117,13 @@ public:
* This constraint will be added to the working set and become active
* in the next iteration
*/
boost::tuple<double, int, int> computeStepSize(const GaussianFactorGraph& workingGraph,
const VectorValues& xk, const VectorValues& p) const;
boost::tuple<double, int, int> computeStepSize(
const GaussianFactorGraph& workingGraph, const VectorValues& xk,
const VectorValues& p) const;
/** Iterate 1 step, modify workingGraph and currentSolution *IN PLACE* !!! */
bool iterateInPlace(GaussianFactorGraph& workingGraph, VectorValues& currentSolution,
VectorValues& lambdas) const;
bool iterateInPlace(GaussianFactorGraph& workingGraph,
VectorValues& currentSolution, VectorValues& lambdas) const;
/** Optimize with a provided initial values
* For this version, it is the responsibility of the caller to provide
@ -127,7 +132,8 @@ public:
* of optimize().
* @return a pair of <primal, dual> solutions
*/
std::pair<VectorValues, VectorValues> optimize(const VectorValues& initials) const;
std::pair<VectorValues, VectorValues> optimize(
const VectorValues& initials) const;
/** Optimize without an initial value.
* This version of optimize will try to find a feasible initial value by solving
@ -137,7 +143,6 @@ public:
*/
std::pair<VectorValues, VectorValues> optimize() const;
/**
* Create initial values for the LP subproblem
* @return initial values and the key for the first slack variable
@ -148,14 +153,16 @@ public:
VectorValues objectiveCoeffsLP(Key firstSlackKey) const;
/// Build constraints and slacks' lower bounds for the LP subproblem
std::pair<GaussianFactorGraph::shared_ptr, VectorValues> constraintsLP(Key firstSlackKey) const;
std::pair<GaussianFactorGraph::shared_ptr, VectorValues> constraintsLP(
Key firstSlackKey) const;
/// Find a feasible initial point
std::pair<bool, VectorValues> findFeasibleInitialValues() const;
/// Convert a Gaussian factor to a jacobian. return empty shared ptr if failed
/// TODO: Move to GaussianFactor?
static JacobianFactor::shared_ptr toJacobian(const GaussianFactor::shared_ptr& factor) {
static JacobianFactor::shared_ptr toJacobian(
const GaussianFactor::shared_ptr& factor) {
JacobianFactor::shared_ptr jacobian(
boost::dynamic_pointer_cast<JacobianFactor>(factor));
return jacobian;
@ -163,17 +170,19 @@ public:
/// Convert a Gaussian factor to a Hessian. Return empty shared ptr if failed
/// TODO: Move to GaussianFactor?
static HessianFactor::shared_ptr toHessian(const GaussianFactor::shared_ptr factor) {
HessianFactor::shared_ptr hessian(boost::dynamic_pointer_cast<HessianFactor>(factor));
static HessianFactor::shared_ptr toHessian(
const GaussianFactor::shared_ptr factor) {
HessianFactor::shared_ptr hessian(
boost::dynamic_pointer_cast<HessianFactor>(factor));
return hessian;
}
private:
/// Collect all free Hessians involving constrained variables into a graph
GaussianFactorGraph::shared_ptr unconstrainedHessiansOfConstrainedVars(
const GaussianFactorGraph& graph, const std::set<Key>& constrainedVars) const;
const GaussianFactorGraph& graph,
const std::set<Key>& constrainedVars) const;
};
} /* namespace gtsam */

View File

@ -16,11 +16,12 @@
* @author Duy-Nguyen Ta
*/
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam_unstable/linear/QPSolver.h>
#include <CppUnitLite/TestHarness.h>
using namespace std;
using namespace gtsam;
using namespace gtsam::symbol_shorthand;
@ -72,11 +73,13 @@ TEST(QPSolver, constraintsAux) {
VectorValues lambdas2;
lambdas2.insert(constraintIx[0], (Vector(4) << -0.5, 0.0, -0.3, -0.1));
int factorIx2, lambdaIx2;
boost::tie(factorIx2, lambdaIx2) = solver.findWorstViolatedActiveIneq(lambdas2);
boost::tie(factorIx2, lambdaIx2) = solver.findWorstViolatedActiveIneq(
lambdas2);
LONGS_EQUAL(-1, factorIx2);
LONGS_EQUAL(-1, lambdaIx2);
GaussianFactorGraph::shared_ptr freeHessian = solver.freeHessiansOfConstrainedVars();
GaussianFactorGraph::shared_ptr freeHessian =
solver.freeHessiansOfConstrainedVars();
GaussianFactorGraph expectedFreeHessian;
expectedFreeHessian.push_back(
HessianFactor(X(1), X(2), 2.0 * ones(1, 1), -ones(1, 1), 3.0 * ones(1),
@ -223,11 +226,17 @@ GaussianFactorGraph createTestNocedal06bookEx16_4() {
graph.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1)));
// Inequality constraints
noiseModel::Constrained::shared_ptr noise = noiseModel::Constrained::MixedSigmas(
(Vector(1) << -1));
graph.push_back(JacobianFactor(X(1), -ones(1,1), X(2), 2*ones(1,1), 2*ones(1), noise));
graph.push_back(JacobianFactor(X(1), ones(1,1), X(2), 2*ones(1,1), 6*ones(1), noise));
graph.push_back(JacobianFactor(X(1), ones(1,1), X(2),-2*ones(1,1), 2*ones(1), noise));
noiseModel::Constrained::shared_ptr noise =
noiseModel::Constrained::MixedSigmas((Vector(1) << -1));
graph.push_back(
JacobianFactor(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), 2 * ones(1),
noise));
graph.push_back(
JacobianFactor(X(1), ones(1, 1), X(2), 2 * ones(1, 1), 6 * ones(1),
noise));
graph.push_back(
JacobianFactor(X(1), ones(1, 1), X(2), -2 * ones(1, 1), 2 * ones(1),
noise));
graph.push_back(JacobianFactor(X(1), -ones(1, 1), zero(1), noise));
graph.push_back(JacobianFactor(X(2), -ones(1, 1), zero(1), noise));
@ -279,11 +288,17 @@ GaussianFactorGraph modifyNocedal06bookEx16_4() {
graph.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1)));
// Inequality constraints
noiseModel::Constrained::shared_ptr noise = noiseModel::Constrained::MixedSigmas(
(Vector(1) << -1));
graph.push_back(JacobianFactor(X(1), -ones(1,1), X(2), 2*ones(1,1), -1*ones(1), noise));
graph.push_back(JacobianFactor(X(1), ones(1,1), X(2), 2*ones(1,1), 6*ones(1), noise));
graph.push_back(JacobianFactor(X(1), ones(1,1), X(2),-2*ones(1,1), 2*ones(1), noise));
noiseModel::Constrained::shared_ptr noise =
noiseModel::Constrained::MixedSigmas((Vector(1) << -1));
graph.push_back(
JacobianFactor(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), -1 * ones(1),
noise));
graph.push_back(
JacobianFactor(X(1), ones(1, 1), X(2), 2 * ones(1, 1), 6 * ones(1),
noise));
graph.push_back(
JacobianFactor(X(1), ones(1, 1), X(2), -2 * ones(1, 1), 2 * ones(1),
noise));
graph.push_back(JacobianFactor(X(1), -ones(1, 1), zero(1), noise));
graph.push_back(JacobianFactor(X(2), -ones(1, 1), zero(1), noise));
@ -316,13 +331,21 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4_findInitialPoint) {
EXPECT(assert_equal(zero(1), lowerBounds.at(firstSlackKey+i)));
GaussianFactorGraph expectedConstraints;
noiseModel::Constrained::shared_ptr noise = noiseModel::Constrained::MixedSigmas(
(Vector(1) << -1));
expectedConstraints.push_back(JacobianFactor(X(1), -ones(1,1), X(2), 2*ones(1,1), X(3), -ones(1,1),-1*ones(1), noise));
expectedConstraints.push_back(JacobianFactor(X(1), ones(1,1), X(2), 2*ones(1,1), X(4), -ones(1,1), 6*ones(1), noise));
expectedConstraints.push_back(JacobianFactor(X(1), ones(1,1), X(2),-2*ones(1,1), X(5), -ones(1,1), 2*ones(1), noise));
expectedConstraints.push_back(JacobianFactor(X(1), -ones(1,1), X(6), -ones(1,1), zero(1), noise));
expectedConstraints.push_back(JacobianFactor(X(2), -ones(1,1), X(7), -ones(1,1), zero(1), noise));
noiseModel::Constrained::shared_ptr noise =
noiseModel::Constrained::MixedSigmas((Vector(1) << -1));
expectedConstraints.push_back(
JacobianFactor(X(1), -ones(1, 1), X(2), 2 * ones(1, 1), X(3), -ones(1, 1),
-1 * ones(1), noise));
expectedConstraints.push_back(
JacobianFactor(X(1), ones(1, 1), X(2), 2 * ones(1, 1), X(4), -ones(1, 1),
6 * ones(1), noise));
expectedConstraints.push_back(
JacobianFactor(X(1), ones(1, 1), X(2), -2 * ones(1, 1), X(5), -ones(1, 1),
2 * ones(1), noise));
expectedConstraints.push_back(
JacobianFactor(X(1), -ones(1, 1), X(6), -ones(1, 1), zero(1), noise));
expectedConstraints.push_back(
JacobianFactor(X(2), -ones(1, 1), X(7), -ones(1, 1), zero(1), noise));
EXPECT(assert_equal(expectedConstraints, *constraints));
bool isFeasible;
@ -364,7 +387,8 @@ TEST(QPSolver, failedSubproblem) {
GaussianFactorGraph graph;
graph.push_back(JacobianFactor(X(1), eye(2), zero(2)));
graph.push_back(HessianFactor(X(1), zeros(2, 2), zero(2), 100.0));
graph.push_back(JacobianFactor(X(1), (Matrix(1,2)<<-1.0, 0.0), -ones(1),
graph.push_back(
JacobianFactor(X(1), (Matrix(1, 2) << -1.0, 0.0), -ones(1),
noiseModel::Constrained::MixedSigmas(-ones(1))));
VectorValues expected;