Merge remote-tracking branch 'origin/feature/LPSolver' into feature/LPSolver

# Conflicts:
#	gtsam_unstable/linear/ActiveSetSolver.cpp
#	gtsam_unstable/linear/ActiveSetSolver.h
#	gtsam_unstable/linear/LPInitSolver.h
#	gtsam_unstable/linear/LPSolver.cpp
#	gtsam_unstable/linear/LPSolver.h
#	gtsam_unstable/linear/QPSolver.cpp
#	gtsam_unstable/linear/QPSolver.h
release/4.3a0
= 2016-06-17 08:32:55 -04:00
commit 8c922b56c3
18 changed files with 831 additions and 928 deletions

View File

@ -0,0 +1,290 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file ActiveSetSolver-inl.h
* @brief Implmentation of ActiveSetSolver.
* @author Ivan Dario Jimenez
* @author Duy Nguyen Ta
* @date 2/11/16
*/
#include <gtsam_unstable/linear/InfeasibleInitialValues.h>
/******************************************************************************/
// Convenient macros to reduce syntactic noise. undef later.
#define Template template <class PROBLEM, class POLICY, class INITSOLVER>
#define This ActiveSetSolver<PROBLEM, POLICY, INITSOLVER>
/******************************************************************************/
namespace gtsam {
/* We have to make sure the new solution with alpha satisfies all INACTIVE inequality constraints
* If some inactive inequality constraints complain about the full step (alpha = 1),
* we have to adjust alpha to stay within the inequality constraints' feasible regions.
*
* For each inactive inequality j:
* - We already have: aj'*xk - bj <= 0, since xk satisfies all inequality constraints
* - We want: aj'*(xk + alpha*p) - bj <= 0
* - If aj'*p <= 0, we have: aj'*(xk + alpha*p) <= aj'*xk <= bj, for all alpha>0
* it's good!
* - We only care when aj'*p > 0. In this case, we need to choose alpha so that
* aj'*xk + alpha*aj'*p - bj <= 0 --> alpha <= (bj - aj'*xk) / (aj'*p)
* We want to step as far as possible, so we should choose alpha = (bj - aj'*xk) / (aj'*p)
*
* We want the minimum of all those alphas among all inactive inequality.
*/
Template boost::tuple<double, int> This::computeStepSize(
const InequalityFactorGraph& workingSet, const VectorValues& xk,
const VectorValues& p, const double& maxAlpha) const {
double minAlpha = maxAlpha;
int closestFactorIx = -1;
for (size_t factorIx = 0; factorIx < workingSet.size(); ++factorIx) {
const LinearInequality::shared_ptr& factor = workingSet.at(factorIx);
double b = factor->getb()[0];
// only check inactive factors
if (!factor->active()) {
// Compute a'*p
double aTp = factor->dotProductRow(p);
// Check if a'*p >0. Don't care if it's not.
if (aTp <= 0)
continue;
// Compute a'*xk
double aTx = factor->dotProductRow(xk);
// alpha = (b - a'*xk) / (a'*p)
double alpha = (b - aTx) / aTp;
// We want the minimum of all those max alphas
if (alpha < minAlpha) {
closestFactorIx = factorIx;
minAlpha = alpha;
}
}
}
return boost::make_tuple(minAlpha, closestFactorIx);
}
/******************************************************************************/
/*
* The goal of this function is to find currently active inequality constraints
* that violate the condition to be active. The one that violates the condition
* the most will be removed from the active set. See Nocedal06book, pg 469-471
*
* Find the BAD active inequality that pulls x strongest to the wrong direction
* of its constraint (i.e. it is pulling towards >0, while its feasible region is <=0)
*
* For active inequality constraints (those that are enforced as equality constraints
* in the current working set), we want lambda < 0.
* This is because:
* - From the Lagrangian L = f - lambda*c, we know that the constraint force
* is (lambda * \grad c) = \grad f. Intuitively, to keep the solution x stay
* on the constraint surface, the constraint force has to balance out with
* other unconstrained forces that are pulling x towards the unconstrained
* minimum point. The other unconstrained forces are pulling x toward (-\grad f),
* hence the constraint force has to be exactly \grad f, so that the total
* force is 0.
* - We also know that at the constraint surface c(x)=0, \grad c points towards + (>= 0),
* while we are solving for - (<=0) constraint.
* - We want the constraint force (lambda * \grad c) to pull x towards the - (<=0) direction
* i.e., the opposite direction of \grad c where the inequality constraint <=0 is satisfied.
* That means we want lambda < 0.
* - This is because when the constrained force pulls x towards the infeasible region (+),
* the unconstrained force is pulling x towards the opposite direction into
* the feasible region (again because the total force has to be 0 to make x stay still)
* So we can drop this constraint to have a lower error but feasible solution.
*
* In short, active inequality constraints with lambda > 0 are BAD, because they
* violate the condition to be active.
*
* And we want to remove the worst one with the largest lambda from the active set.
*
*/
Template int This::identifyLeavingConstraint(
const InequalityFactorGraph& workingSet,
const VectorValues& lambdas) const {
int worstFactorIx = -1;
// preset the maxLambda to 0.0: if lambda is <= 0.0, the constraint is either
// inactive or a good inequality constraint, so we don't care!
double maxLambda = 0.0;
for (size_t factorIx = 0; factorIx < workingSet.size(); ++factorIx) {
const LinearInequality::shared_ptr& factor = workingSet.at(factorIx);
if (factor->active()) {
double lambda = lambdas.at(factor->dualKey())[0];
if (lambda > maxLambda) {
worstFactorIx = factorIx;
maxLambda = lambda;
}
}
}
return worstFactorIx;
}
//******************************************************************************
Template JacobianFactor::shared_ptr This::createDualFactor(
Key key, const InequalityFactorGraph& workingSet,
const VectorValues& delta) const {
// Transpose the A matrix of constrained factors to have the jacobian of the
// dual key
TermsContainer Aterms = collectDualJacobians<LinearEquality>(
key, problem_.equalities, equalityVariableIndex_);
TermsContainer AtermsInequalities = collectDualJacobians<LinearInequality>(
key, workingSet, inequalityVariableIndex_);
Aterms.insert(Aterms.end(), AtermsInequalities.begin(),
AtermsInequalities.end());
// Collect the gradients of unconstrained cost factors to the b vector
if (Aterms.size() > 0) {
Vector b = problem_.costGradient(key, delta);
// to compute the least-square approximation of dual variables
return boost::make_shared<JacobianFactor>(Aterms, b);
} else {
return boost::make_shared<JacobianFactor>();
}
}
/******************************************************************************/
/* This function will create a dual graph that solves for the
* lagrange multipliers for the current working set.
* You can use lagrange multipliers as a necessary condition for optimality.
* The factor graph that is being solved is f' = -lambda * g'
* where f is the optimized function and g is the function resulting from
* aggregating the working set.
* The lambdas give you information about the feasibility of a constraint.
* if lambda < 0 the constraint is Ok
* if lambda = 0 you are on the constraint
* if lambda > 0 you are violating the constraint.
*/
Template GaussianFactorGraph::shared_ptr This::buildDualGraph(
const InequalityFactorGraph& workingSet, const VectorValues& delta) const {
GaussianFactorGraph::shared_ptr dualGraph(new GaussianFactorGraph());
for (Key key : constrainedKeys_) {
// Each constrained key becomes a factor in the dual graph
JacobianFactor::shared_ptr dualFactor =
createDualFactor(key, workingSet, delta);
if (!dualFactor->empty()) dualGraph->push_back(dualFactor);
}
return dualGraph;
}
//******************************************************************************
Template GaussianFactorGraph
This::buildWorkingGraph(const InequalityFactorGraph& workingSet,
const VectorValues& xk) const {
GaussianFactorGraph workingGraph;
workingGraph.push_back(POLICY::buildCostFunction(problem_, xk));
workingGraph.push_back(problem_.equalities);
for (const LinearInequality::shared_ptr& factor : workingSet)
if (factor->active()) workingGraph.push_back(factor);
return workingGraph;
}
//******************************************************************************
Template typename This::State This::iterate(
const typename This::State& state) const {
// Algorithm 16.3 from Nocedal06book.
// Solve with the current working set eqn 16.39, but instead of solving for p
// solve for x
GaussianFactorGraph workingGraph =
buildWorkingGraph(state.workingSet, state.values);
VectorValues newValues = workingGraph.optimize();
// If we CAN'T move further
// if p_k = 0 is the original condition, modified by Duy to say that the state
// update is zero.
if (newValues.equals(state.values, 1e-7)) {
// Compute lambda from the dual graph
GaussianFactorGraph::shared_ptr dualGraph = buildDualGraph(state.workingSet,
newValues);
VectorValues duals = dualGraph->optimize();
int leavingFactor = identifyLeavingConstraint(state.workingSet, duals);
// If all inequality constraints are satisfied: We have the solution!!
if (leavingFactor < 0) {
return State(newValues, duals, state.workingSet, true,
state.iterations + 1);
} else {
// Inactivate the leaving constraint
InequalityFactorGraph newWorkingSet = state.workingSet;
newWorkingSet.at(leavingFactor)->inactivate();
return State(newValues, duals, newWorkingSet, false,
state.iterations + 1);
}
} else {
// If we CAN make some progress, i.e. p_k != 0
// Adapt stepsize if some inactive constraints complain about this move
double alpha;
int factorIx;
VectorValues p = newValues - state.values;
boost::tie(alpha, factorIx) = // using 16.41
computeStepSize(state.workingSet, state.values, p, POLICY::maxAlpha);
// also add to the working set the one that complains the most
InequalityFactorGraph newWorkingSet = state.workingSet;
if (factorIx >= 0)
newWorkingSet.at(factorIx)->activate();
// step!
newValues = state.values + alpha * p;
return State(newValues, state.duals, newWorkingSet, false,
state.iterations + 1);
}
}
//******************************************************************************
Template InequalityFactorGraph This::identifyActiveConstraints(
const InequalityFactorGraph& inequalities,
const VectorValues& initialValues, const VectorValues& duals,
bool useWarmStart) const {
InequalityFactorGraph workingSet;
for (const LinearInequality::shared_ptr& factor : inequalities) {
LinearInequality::shared_ptr workingFactor(new LinearInequality(*factor));
if (useWarmStart && duals.size() > 0) {
if (duals.exists(workingFactor->dualKey())) workingFactor->activate();
else workingFactor->inactivate();
} else {
double error = workingFactor->error(initialValues);
// Safety guard. This should not happen unless users provide a bad init
if (error > 0) throw InfeasibleInitialValues();
if (fabs(error) < 1e-7)
workingFactor->activate();
else
workingFactor->inactivate();
}
workingSet.push_back(workingFactor);
}
return workingSet;
}
//******************************************************************************
Template std::pair<VectorValues, VectorValues> This::optimize(
const VectorValues& initialValues, const VectorValues& duals,
bool useWarmStart) const {
// Initialize workingSet from the feasible initialValues
InequalityFactorGraph workingSet = identifyActiveConstraints(
problem_.inequalities, initialValues, duals, useWarmStart);
State state(initialValues, duals, workingSet, false, 0);
/// main loop of the solver
while (!state.converged) state = iterate(state);
return std::make_pair(state.values, state.duals);
}
//******************************************************************************
Template std::pair<VectorValues, VectorValues> This::optimize() const {
INITSOLVER initSolver(problem_);
VectorValues initValues = initSolver.solve();
return optimize(initValues);
}
}
#undef Template
#undef This

View File

@ -1,177 +0,0 @@
/**
* @file ActiveSetSolver.cpp
* @brief Implmentation of ActiveSetSolver.
* @author Ivan Dario Jimenez
* @author Duy Nguyen Ta
* @date 2/11/16
*/
#include <gtsam_unstable/linear/ActiveSetSolver.h>
#include "InfeasibleInitialValues.h"
namespace gtsam {
/*
* The goal of this function is to find currently active inequality constraints
* that violate the condition to be active. The one that violates the condition
* the most will be removed from the active set. See Nocedal06book, pg 469-471
*
* Find the BAD active inequality that pulls x strongest to the wrong direction
* of its constraint (i.e. it is pulling towards >0, while its feasible region is <=0)
*
* For active inequality constraints (those that are enforced as equality constraints
* in the current working set), we want lambda < 0.
* This is because:
* - From the Lagrangian L = f - lambda*c, we know that the constraint force
* is (lambda * \grad c) = \grad f. Intuitively, to keep the solution x stay
* on the constraint surface, the constraint force has to balance out with
* other unconstrained forces that are pulling x towards the unconstrained
* minimum point. The other unconstrained forces are pulling x toward (-\grad f),
* hence the constraint force has to be exactly \grad f, so that the total
* force is 0.
* - We also know that at the constraint surface c(x)=0, \grad c points towards + (>= 0),
* while we are solving for - (<=0) constraint.
* - We want the constraint force (lambda * \grad c) to pull x towards the - (<=0) direction
* i.e., the opposite direction of \grad c where the inequality constraint <=0 is satisfied.
* That means we want lambda < 0.
* - This is because when the constrained force pulls x towards the infeasible region (+),
* the unconstrained force is pulling x towards the opposite direction into
* the feasible region (again because the total force has to be 0 to make x stay still)
* So we can drop this constraint to have a lower error but feasible solution.
*
* In short, active inequality constraints with lambda > 0 are BAD, because they
* violate the condition to be active.
*
* And we want to remove the worst one with the largest lambda from the active set.
*
*/
int ActiveSetSolver::identifyLeavingConstraint(
const InequalityFactorGraph& workingSet,
const VectorValues& lambdas) const {
int worstFactorIx = -1;
// preset the maxLambda to 0.0: if lambda is <= 0.0, the constraint is
// either
// inactive or a good inequality constraint, so we don't care!
double maxLambda = 0.0;
for (size_t factorIx = 0; factorIx < workingSet.size(); ++factorIx) {
const LinearInequality::shared_ptr& factor = workingSet.at(factorIx);
if (factor->active()) {
double lambda = lambdas.at(factor->dualKey())[0];
if (lambda > maxLambda) {
worstFactorIx = factorIx;
maxLambda = lambda;
}
}
}
return worstFactorIx;
}
/* This function will create a dual graph that solves for the
* lagrange multipliers for the current working set.
* You can use lagrange multipliers as a necessary condition for optimality.
* The factor graph that is being solved is f' = -lambda * g'
* where f is the optimized function and g is the function resulting from
* aggregating the working set.
* The lambdas give you information about the feasibility of a constraint.
* if lambda < 0 the constraint is Ok
* if lambda = 0 you are on the constraint
* if lambda > 0 you are violating the constraint.
*/
GaussianFactorGraph::shared_ptr ActiveSetSolver::buildDualGraph(
const InequalityFactorGraph& workingSet, const VectorValues& delta) const {
GaussianFactorGraph::shared_ptr dualGraph(new GaussianFactorGraph());
for (Key key : constrainedKeys_) {
// Each constrained key becomes a factor in the dual graph
JacobianFactor::shared_ptr dualFactor = createDualFactor(key, workingSet,
delta);
if (!dualFactor->empty())
dualGraph->push_back(dualFactor);
}
return dualGraph;
}
/*
* Compute step size alpha for the new solution x' = xk + alpha*p, where alpha \in [0,1]
*
* @return a tuple of (alpha, factorIndex, sigmaIndex) where (factorIndex, sigmaIndex)
* is the constraint that has minimum alpha, or (-1,-1) if alpha = 1.
* This constraint will be added to the working set and become active
* in the next iteration.
*/
/* We have to make sure the new solution with alpha satisfies all INACTIVE inequality constraints
* If some inactive inequality constraints complain about the full step (alpha = 1),
* we have to adjust alpha to stay within the inequality constraints' feasible regions.
*
* For each inactive inequality j:
* - We already have: aj'*xk - bj <= 0, since xk satisfies all inequality constraints
* - We want: aj'*(xk + alpha*p) - bj <= 0
* - If aj'*p <= 0, we have: aj'*(xk + alpha*p) <= aj'*xk <= bj, for all alpha>0
* it's good!
* - We only care when aj'*p > 0. In this case, we need to choose alpha so that
* aj'*xk + alpha*aj'*p - bj <= 0 --> alpha <= (bj - aj'*xk) / (aj'*p)
* We want to step as far as possible, so we should choose alpha = (bj - aj'*xk) / (aj'*p)
*
* We want the minimum of all those alphas among all inactive inequality.
*/
boost::tuple<double, int> ActiveSetSolver::computeStepSize(
const InequalityFactorGraph& workingSet, const VectorValues& xk,
const VectorValues& p) const {
double minAlpha = startAlpha_;
int closestFactorIx = -1;
for (size_t factorIx = 0; factorIx < workingSet.size(); ++factorIx) {
const LinearInequality::shared_ptr& factor = workingSet.at(factorIx);
double b = factor->getb()[0];
// only check inactive factors
if (!factor->active()) {
// Compute a'*p
double aTp = factor->dotProductRow(p);
// Check if a'*p >0. Don't care if it's not.
if (aTp <= 0)
continue;
// Compute a'*xk
double aTx = factor->dotProductRow(xk);
// alpha = (b - a'*xk) / (a'*p)
double alpha = (b - aTx) / aTp;
// We want the minimum of all those max alphas
if (alpha < minAlpha) {
closestFactorIx = factorIx;
minAlpha = alpha;
}
}
}
return boost::make_tuple(minAlpha, closestFactorIx);
}
//******************************************************************************
InequalityFactorGraph ActiveSetSolver::identifyActiveConstraints(
const InequalityFactorGraph& inequalities,
const VectorValues& initialValues, const VectorValues& duals,
bool useWarmStart) const {
InequalityFactorGraph workingSet;
for (const LinearInequality::shared_ptr &factor : inequalities) {
LinearInequality::shared_ptr workingFactor(new LinearInequality(*factor));
if (useWarmStart && duals.size() > 0) {
if (duals.exists(workingFactor->dualKey()))
workingFactor->activate();
else
workingFactor->inactivate();
} else {
double error = workingFactor->error(initialValues);
// Safety guard. This should not happen unless users provide a bad init
if (error > 0)
throw InfeasibleInitialValues();
if (fabs(error) < 1e-7)
workingFactor->activate();
else
workingFactor->inactivate();
}
workingSet.push_back(workingFactor);
}
return workingSet;
}
}

View File

@ -1,6 +1,17 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file ActiveSetSolver.h
* @brief Abstract class above for solving problems with the abstract set method.
* @brief Active set method for solving LP, QP problems
* @author Ivan Dario Jimenez
* @author Duy Nguyen Ta
* @date 1/25/16
@ -14,26 +25,98 @@
namespace gtsam {
/**
* This is a base class for all implementations of the active set algorithm for solving
* Programming problems. It provides services and variables all active set implementations
* share.
* This class implements the active set algorithm for solving convex
* Programming problems.
*
* @tparam PROBLEM Type of the problem to solve, e.g. LP (linear program) or
* QP (quadratic program).
* @tparam POLICY specific detail policy tailored for the particular program
* @tparam INITSOLVER Solver for an initial feasible solution of this problem.
*/
template <class PROBLEM, class POLICY, class INITSOLVER>
class ActiveSetSolver {
protected:
KeySet constrainedKeys_; //!< all constrained keys, will become factors in dual graphs
VariableIndex equalityVariableIndex_,
inequalityVariableIndex_; //!< index to corresponding factors to build dual graphs
double startAlpha_;
public:
typedef std::vector<std::pair<Key, Matrix> > TermsContainer; //!< vector of key matrix pairs
//Matrices are usually the A term for a factor.
/// This struct contains the state information for a single iteration
struct State {
VectorValues values; //!< current best values at each step
VectorValues duals; //!< current values of dual variables at each step
InequalityFactorGraph workingSet; /*!< keep track of current active/inactive
inequality constraints */
bool converged; //!< True if the algorithm has converged to a solution
size_t iterations; /*!< Number of iterations. Incremented at the end of
each iteration. */
/// Default constructor
State()
: values(), duals(), workingSet(), converged(false), iterations(0) {}
/// Constructor with initial values
State(const VectorValues& initialValues, const VectorValues& initialDuals,
const InequalityFactorGraph& initialWorkingSet, bool _converged,
size_t _iterations)
: values(initialValues),
duals(initialDuals),
workingSet(initialWorkingSet),
converged(_converged),
iterations(_iterations) {}
};
protected:
const PROBLEM& problem_; //!< the particular [convex] problem to solve
VariableIndex equalityVariableIndex_,
inequalityVariableIndex_; /*!< index to corresponding factors to build
dual graphs */
KeySet constrainedKeys_; /*!< all constrained keys, will become factors in
dual graphs */
/// Vector of key matrix pairs. Matrices are usually the A term for a factor.
typedef std::vector<std::pair<Key, Matrix> > TermsContainer;
public:
/// Constructor
ActiveSetSolver(const PROBLEM& problem) : problem_(problem) {
equalityVariableIndex_ = VariableIndex(problem_.equalities);
inequalityVariableIndex_ = VariableIndex(problem_.inequalities);
constrainedKeys_ = problem_.equalities.keys();
constrainedKeys_.merge(problem_.inequalities.keys());
}
/**
* Creates a dual factor from the current workingSet and the key of the
* the variable used to created the dual factor.
* Optimize with provided initial values
* For this version, it is the responsibility of the caller to provide
* a feasible initial value, otherwise, an exception will be thrown.
* @return a pair of <primal, dual> solutions
*/
virtual JacobianFactor::shared_ptr createDualFactor(Key key,
const InequalityFactorGraph& workingSet,
const VectorValues& delta) const = 0;
std::pair<VectorValues, VectorValues> optimize(
const VectorValues& initialValues,
const VectorValues& duals = VectorValues(),
bool useWarmStart = false) const;
/**
* For this version the caller will not have to provide an initial value
* @return a pair of <primal, dual> solutions
*/
std::pair<VectorValues, VectorValues> optimize() const;
protected:
/**
* Compute minimum step size alpha to move from the current point @p xk to the
* next feasible point along a direction @p p: x' = xk + alpha*p,
* where alpha \in [0,maxAlpha].
*
* For QP, maxAlpha = 1. For LP: maxAlpha = Inf.
*
* @return a tuple of (minAlpha, closestFactorIndex) where closestFactorIndex
* is the closest inactive inequality constraint that blocks xk to move
* further and that has the minimum alpha, or (-1, maxAlpha) if there is no
* such inactive blocking constraint.
*
* If there is a blocking constraint, the closest one will be added to the
* working set and become active in the next iteration.
*/
boost::tuple<double, int> computeStepSize(
const InequalityFactorGraph& workingSet, const VectorValues& xk,
const VectorValues& p, const double& maxAlpha) const;
/**
* Finds the active constraints in the given factor graph and returns the
@ -59,45 +142,46 @@ public:
}
return Aterms;
}
/**
* Identifies active constraints that shouldn't be active anymore.
*/
int identifyLeavingConstraint(const InequalityFactorGraph& workingSet,
const VectorValues& lambdas) const;
/**
* Builds a dual graph from the current working set.
* Creates a dual factor from the current workingSet and the key of the
* the variable used to created the dual factor.
*/
JacobianFactor::shared_ptr createDualFactor(
Key key, const InequalityFactorGraph& workingSet,
const VectorValues& delta) const;
public: /// Just for testing...
/// Builds a dual graph from the current working set.
GaussianFactorGraph::shared_ptr buildDualGraph(
const InequalityFactorGraph& workingSet, const VectorValues& delta) const;
/*
* Given an initial value this function determine which constraints are active
* which can be used to initialize the working set.
* A constraint Ax <= b is active if we have an x' s.t. Ax' = b
/**
* Build a working graph of cost, equality and active inequality constraints
* to solve at each iteration.
* @param workingSet the collection of all cost and constrained factors
* @param xk current solution, used to build a special quadratic cost in LP
* @return a new better solution
*/
GaussianFactorGraph buildWorkingGraph(
const InequalityFactorGraph& workingSet,
const VectorValues& xk = VectorValues()) const;
/// Iterate 1 step, return a new state with a new workingSet and values
State iterate(const State& state) const;
/// Identify active constraints based on initial values.
InequalityFactorGraph identifyActiveConstraints(
const InequalityFactorGraph& inequalities,
const VectorValues& initialValues, const VectorValues& duals =
VectorValues(), bool useWarmStart = true) const;
const VectorValues& initialValues,
const VectorValues& duals = VectorValues(),
bool useWarmStart = false) const;
protected:
/**
* Protected constructor because this class doesn't have any meaning without
* a concrete Programming problem to solve.
*/
ActiveSetSolver(double startAlpha) :
constrainedKeys_(), startAlpha_(startAlpha) {
}
/// Identifies active constraints that shouldn't be active anymore.
int identifyLeavingConstraint(const InequalityFactorGraph& workingSet,
const VectorValues& lambdas) const;
/**
* Computes the distance to move from the current point being examined to the next
* location to be examined by the graph. This should only be used where there are less
* than two constraints active.
*/
boost::tuple<double, int> computeStepSize(
const InequalityFactorGraph& workingSet, const VectorValues& xk,
const VectorValues& p) const;
};
/**
@ -116,3 +200,5 @@ Key maxKey(const PROBLEM& problem) {
}
} // namespace gtsam
#include <gtsam_unstable/linear/ActiveSetSolver-inl.h>

View File

@ -1,3 +1,14 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file InfeasibleInitialValues.h
* @brief Exception thrown when given Infeasible Initial Values.

View File

@ -1,3 +1,14 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file InfeasibleOrUnboundedProblem.h
* @brief Throw when the problem is either infeasible or unbounded
@ -5,6 +16,8 @@
* @date 1/24/16
*/
#pragma once
namespace gtsam {
class InfeasibleOrUnboundedProblem: public ThreadsafeException<

View File

@ -1,3 +1,14 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file LP.h
* @brief Struct used to hold a Linear Programming Problem
@ -9,6 +20,7 @@
#include <gtsam_unstable/linear/LinearCost.h>
#include <gtsam_unstable/linear/EqualityFactorGraph.h>
#include <gtsam_unstable/linear/InequalityFactorGraph.h>
#include <string>

View File

@ -0,0 +1,110 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file LPInitSolver.h
* @brief This finds a feasible solution for an LP problem
* @author Duy Nguyen Ta
* @author Ivan Dario Jimenez
* @date 6/16/16
*/
#include <gtsam_unstable/linear/LPInitSolver.h>
#include <gtsam_unstable/linear/LPSolver.h>
#include <gtsam_unstable/linear/InfeasibleOrUnboundedProblem.h>
namespace gtsam {
/******************************************************************************/
VectorValues LPInitSolver::solve() const {
// Build the graph to solve for the initial value of the initial problem
GaussianFactorGraph::shared_ptr initOfInitGraph = buildInitOfInitGraph();
VectorValues x0 = initOfInitGraph->optimize();
double y0 = compute_y0(x0);
Key yKey = maxKey(lp_) + 1; // the unique key for y0
VectorValues xy0(x0);
xy0.insert(yKey, Vector::Constant(1, y0));
// Formulate and solve the initial LP
LP::shared_ptr initLP = buildInitialLP(yKey);
// solve the initialLP
LPSolver lpSolveInit(*initLP);
VectorValues xyInit = lpSolveInit.optimize(xy0).first;
double yOpt = xyInit.at(yKey)[0];
xyInit.erase(yKey);
if (yOpt > 0)
throw InfeasibleOrUnboundedProblem();
else
return xyInit;
}
/******************************************************************************/
LP::shared_ptr LPInitSolver::buildInitialLP(Key yKey) const {
LP::shared_ptr initLP(new LP());
initLP->cost = LinearCost(yKey, I_1x1); // min y
initLP->equalities = lp_.equalities; // st. Ax = b
initLP->inequalities =
addSlackVariableToInequalities(yKey,
lp_.inequalities); // Cx-y <= d
return initLP;
}
/******************************************************************************/
GaussianFactorGraph::shared_ptr LPInitSolver::buildInitOfInitGraph() const {
// first add equality constraints Ax = b
GaussianFactorGraph::shared_ptr initGraph(
new GaussianFactorGraph(lp_.equalities));
// create factor ||x||^2 and add to the graph
const KeyDimMap& constrainedKeyDim = lp_.constrainedKeyDimMap();
for (Key key : constrainedKeyDim | boost::adaptors::map_keys) {
size_t dim = constrainedKeyDim.at(key);
initGraph->push_back(
JacobianFactor(key, Matrix::Identity(dim, dim), Vector::Zero(dim)));
}
return initGraph;
}
/******************************************************************************/
double LPInitSolver::compute_y0(const VectorValues& x0) const {
double y0 = -std::numeric_limits<double>::infinity();
for (const auto& factor : lp_.inequalities) {
double error = factor->error(x0);
if (error > y0) y0 = error;
}
return y0;
}
/******************************************************************************/
std::vector<std::pair<Key, Matrix> > LPInitSolver::collectTerms(
const LinearInequality& factor) const {
std::vector<std::pair<Key, Matrix> > terms;
for (Factor::const_iterator it = factor.begin(); it != factor.end(); it++) {
terms.push_back(make_pair(*it, factor.getA(it)));
}
return terms;
}
/******************************************************************************/
InequalityFactorGraph LPInitSolver::addSlackVariableToInequalities(
Key yKey, const InequalityFactorGraph& inequalities) const {
InequalityFactorGraph slackInequalities;
for (const auto& factor : lp_.inequalities) {
std::vector<std::pair<Key, Matrix> > terms = collectTerms(*factor); // Cx
terms.push_back(make_pair(yKey, Matrix::Constant(1, 1, -1.0))); // -y
double d = factor->getb()[0];
slackInequalities.push_back(LinearInequality(terms, d, factor->dualKey()));
}
return slackInequalities;
}
}

View File

@ -1,3 +1,14 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file LPInitSolver.h
* @brief This LPInitSolver implements the strategy in Matlab.
@ -8,7 +19,8 @@
#pragma once
#include <gtsam_unstable/linear/InfeasibleOrUnboundedProblem.h>
#include <gtsam_unstable/linear/LP.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <CppUnitLite/Test.h>
namespace gtsam {
@ -43,101 +55,35 @@ private:
const LP& lp_;
public:
LPInitSolver(const LP& lp) : lp_(lp) {
}
/// Construct with an LP problem
LPInitSolver(const LP& lp) : lp_(lp) {}
virtual ~LPInitSolver() {
}
virtual VectorValues solve() const {
// Build the graph to solve for the initial value of the initial problem
GaussianFactorGraph::shared_ptr initOfInitGraph = buildInitOfInitGraph();
VectorValues x0 = initOfInitGraph->optimize();
double y0 = compute_y0(x0);
Key yKey = maxKey(lp_) + 1; // the unique key for y0
VectorValues xy0(x0);
xy0.insert(yKey, Vector::Constant(1, y0));
// Formulate and solve the initial LP
LP::shared_ptr initLP = buildInitialLP(yKey);
// solve the initialLP
LPSolver lpSolveInit(*initLP);
VectorValues xyInit = lpSolveInit.optimize(xy0).first;
double yOpt = xyInit.at(yKey)[0];
xyInit.erase(yKey);
if (yOpt > 0)
throw InfeasibleOrUnboundedProblem();
else
return xyInit;
}
///@return a feasible initialization point
VectorValues solve() const;
private:
/// build initial LP
LP::shared_ptr buildInitialLP(Key yKey) const {
LP::shared_ptr initLP(new LP());
initLP->cost = LinearCost(yKey, I_1x1); // min y
initLP->equalities = lp_.equalities; // st. Ax = b
initLP->inequalities = addSlackVariableToInequalities(yKey,
lp_.inequalities); // Cx-y <= d
return initLP;
}
LP::shared_ptr buildInitialLP(Key yKey) const;
/**
* Build the following graph to solve for an initial value of the initial problem
* min ||x||^2 s.t. Ax = b
*/
GaussianFactorGraph::shared_ptr buildInitOfInitGraph() const {
// first add equality constraints Ax = b
GaussianFactorGraph::shared_ptr initGraph(
new GaussianFactorGraph(lp_.equalities));
// create factor ||x||^2 and add to the graph
const KeyDimMap& constrainedKeyDim = lp_.constrainedKeyDimMap();
for (Key key : constrainedKeyDim | boost::adaptors::map_keys) {
size_t dim = constrainedKeyDim.at(key);
initGraph->push_back(
JacobianFactor(key, Matrix::Identity(dim, dim), Vector::Zero(dim)));
}
return initGraph;
}
GaussianFactorGraph::shared_ptr buildInitOfInitGraph() const;
/// y = max_j ( Cj*x0 - dj ) -- due to the inequality constraints y >= Cx - d
double compute_y0(const VectorValues& x0) const {
double y0 = -std::numeric_limits<double>::infinity();
for (const auto& factor : lp_.inequalities) {
double error = factor->error(x0);
if (error > y0)
y0 = error;
}
return y0;
}
double compute_y0(const VectorValues& x0) const;
/// Collect all terms of a factor into a container.
std::vector<std::pair<Key, Matrix> > collectTerms(
const LinearInequality& factor) const {
std::vector < std::pair<Key, Matrix> > terms;
for (Factor::const_iterator it = factor.begin(); it != factor.end(); it++) {
terms.push_back(make_pair(*it, factor.getA(it)));
}
return terms;
}
std::vector<std::pair<Key, Matrix>> collectTerms(
const LinearInequality& factor) const;
/// Turn Cx <= d into Cx - y <= d factors
InequalityFactorGraph addSlackVariableToInequalities(Key yKey,
const InequalityFactorGraph& inequalities) const {
InequalityFactorGraph slackInequalities;
for (const auto& factor : lp_.inequalities) {
std::vector < std::pair<Key, Matrix> > terms = collectTerms(*factor); // Cx
terms.push_back(make_pair(yKey, Matrix::Constant(1, 1, -1.0))); // -y
double d = factor->getb()[0];
slackInequalities.push_back(
LinearInequality(terms, d, factor->dualKey()));
}
return slackInequalities;
}
const InequalityFactorGraph& inequalities) const;
// friend class for unit-testing private methods
FRIEND_TEST(LPInitSolver, initialization);
};
}

View File

@ -1,3 +1,14 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file LPSolver.cpp
* @brief
@ -7,166 +18,8 @@
*/
#include <gtsam_unstable/linear/LPSolver.h>
#include <gtsam_unstable/linear/InfeasibleInitialValues.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam_unstable/linear/LPInitSolver.h>
namespace gtsam {
//******************************************************************************
LPSolver::LPSolver(const LP &lp) :
ActiveSetSolver(std::numeric_limits<double>::infinity()), lp_(lp) {
// Variable index
equalityVariableIndex_ = VariableIndex(lp_.equalities);
inequalityVariableIndex_ = VariableIndex(lp_.inequalities);
constrainedKeys_ = lp_.equalities.keys();
constrainedKeys_.merge(lp_.inequalities.keys());
}
//******************************************************************************
LPState LPSolver::iterate(const LPState &state) const {
// Solve with the current working set
// LP: project the objective neg. gradient to the constraint's null space
// to find the direction to move
VectorValues newValues = solveWithCurrentWorkingSet(state.values,
state.workingSet);
// If we CAN'T move further
// LP: projection on the constraints' nullspace is zero: we are at a vertex
if (newValues.equals(state.values, 1e-7)) {
// Find and remove the bad inequality constraint by computing its lambda
// Compute lambda from the dual graph
// LP: project the objective's gradient onto each constraint gradient to
// obtain the dual scaling factors
// is it true??
GaussianFactorGraph::shared_ptr dualGraph = buildDualGraph(state.workingSet,
newValues);
VectorValues duals = dualGraph->optimize();
// LP: see which inequality constraint has wrong pulling direction, i.e., dual < 0
int leavingFactor = identifyLeavingConstraint(state.workingSet, duals);
// If all inequality constraints are satisfied: We have the solution!!
if (leavingFactor < 0) {
// TODO If we still have infeasible equality constraints: the problem is
// over-constrained. No solution!
// ...
return LPState(newValues, duals, state.workingSet, true,
state.iterations + 1);
} else {
// Inactivate the leaving constraint
// LP: remove the bad ineq constraint out of the working set
InequalityFactorGraph newWorkingSet = state.workingSet;
newWorkingSet.at(leavingFactor)->inactivate();
return LPState(newValues, duals, newWorkingSet, false,
state.iterations + 1);
}
} else {
// If we CAN make some progress, i.e. p_k != 0
// Adapt stepsize if some inactive constraints complain about this move
// LP: projection on nullspace is NOT zero:
// find and put a blocking inactive constraint to the working set,
// otherwise the problem is unbounded!!!
double alpha;
int factorIx;
VectorValues p = newValues - state.values;
// GTSAM_PRINT(p);
boost::tie(alpha, factorIx) = // using 16.41
computeStepSize(state.workingSet, state.values, p);
// also add to the working set the one that complains the most
InequalityFactorGraph newWorkingSet = state.workingSet;
if (factorIx >= 0)
newWorkingSet.at(factorIx)->activate();
// step!
newValues = state.values + alpha * p;
return LPState(newValues, state.duals, newWorkingSet, false,
state.iterations + 1);
}
}
//******************************************************************************
GaussianFactorGraph::shared_ptr LPSolver::createLeastSquareFactors(
const LinearCost &cost, const VectorValues &xk) const {
GaussianFactorGraph::shared_ptr graph(new GaussianFactorGraph());
for (LinearCost::const_iterator it = cost.begin(); it != cost.end(); ++it) {
size_t dim = cost.getDim(it);
Vector b = xk.at(*it) - cost.getA(it).transpose(); // b = xk-g
graph->push_back(JacobianFactor(*it, Matrix::Identity(dim, dim), b));
}
KeySet allKeys = lp_.inequalities.keys();
allKeys.merge(lp_.equalities.keys());
allKeys.merge(KeySet(lp_.cost.keys()));
// Add corresponding factors for all variables that are not explicitly in the
// cost function. Gradients of the cost function wrt to these variables are
// zero (g=0), so b=xk
if (cost.keys().size() != allKeys.size()) {
KeySet difference;
std::set_difference(allKeys.begin(), allKeys.end(), lp_.cost.begin(),
lp_.cost.end(), std::inserter(difference, difference.end()));
for (Key k : difference) {
size_t dim = lp_.constrainedKeyDimMap().at(k);
graph->push_back(JacobianFactor(k, Matrix::Identity(dim, dim), xk.at(k)));
}
}
return graph;
}
//******************************************************************************
VectorValues LPSolver::solveWithCurrentWorkingSet(const VectorValues &xk,
const InequalityFactorGraph &workingSet) const {
GaussianFactorGraph workingGraph;
// || X - Xk + g ||^2
workingGraph.push_back(*createLeastSquareFactors(lp_.cost, xk));
workingGraph.push_back(lp_.equalities);
for (const LinearInequality::shared_ptr &factor : workingSet) {
if (factor->active())
workingGraph.push_back(factor);
}
return workingGraph.optimize();
}
//******************************************************************************
boost::shared_ptr<JacobianFactor> LPSolver::createDualFactor(
Key key, const InequalityFactorGraph &workingSet,
const VectorValues &delta) const {
// Transpose the A matrix of constrained factors to have the jacobian of the
// dual key
TermsContainer Aterms = collectDualJacobians<LinearEquality>(
key, lp_.equalities, equalityVariableIndex_);
TermsContainer AtermsInequalities = collectDualJacobians<LinearInequality>(
key, workingSet, inequalityVariableIndex_);
Aterms.insert(Aterms.end(), AtermsInequalities.begin(),
AtermsInequalities.end());
// Collect the gradients of unconstrained cost factors to the b vector
if (Aterms.size() > 0) {
Vector b = lp_.costGradient(key, delta);
// to compute the least-square approximation of dual variables
return boost::make_shared<JacobianFactor>(Aterms, b);
} else {
return boost::make_shared<JacobianFactor>();
}
}
//******************************************************************************
std::pair<VectorValues, VectorValues> LPSolver::optimize(
const VectorValues &initialValues, const VectorValues &duals) const {
{
// Initialize workingSet from the feasible initialValues
InequalityFactorGraph workingSet = identifyActiveConstraints(
lp_.inequalities, initialValues, duals);
LPState state(initialValues, duals, workingSet, false, 0);
/// main loop of the solver
while (!state.converged) {
state = iterate(state);
}
return make_pair(state.values, state.duals);
}
}
//******************************************************************************
pair<VectorValues, VectorValues> LPSolver::optimize() const {
LPInitSolver initSolver(lp_);
VectorValues initValues = initSolver.solve();
return optimize(initValues);
}
constexpr double LPPolicy::maxAlpha;
}

View File

@ -1,41 +1,36 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file LPSolver.h
* @brief Class used to solve Linear Programming Problems as defined in LP.h
* @brief Policy of ActiveSetSolver to solve Linear Programming Problems
* @author Duy Nguyen Ta
* @author Ivan Dario Jimenez
* @date 1/24/16
* @date 6/16/16
*/
#pragma once
#include <gtsam_unstable/linear/LPState.h>
#include <gtsam_unstable/linear/LP.h>
#include <gtsam_unstable/linear/ActiveSetSolver.h>
#include <gtsam_unstable/linear/LinearCost.h>
#include <gtsam/linear/VectorValues.h>
#include <gtsam_unstable/linear/LPInitSolver.h>
#include <boost/range/adaptor/map.hpp>
#include <limits>
#include <algorithm>
namespace gtsam {
class LPSolver: public ActiveSetSolver {
const LP &lp_; //!< the linear programming problem
public:
/// Constructor
LPSolver(const LP &lp);
const LP &lp() const {
return lp_;
}
/*
* This function performs an iteration of the Active Set Method for solving
* LP problems. At the end of this iteration the problem should either be found
* to be unfeasible, solved or the current state changed to reflect a new
* working set.
*/ //SAME
LPState iterate(const LPState &state) const;
/// Policy for ActivetSetSolver to solve Linear Programming \sa LP problems
struct LPPolicy {
/// Maximum alpha for line search x'=xk + alpha*p, where p is the cost gradient
/// For LP, maxAlpha = Infinity
static constexpr double maxAlpha = std::numeric_limits<double>::infinity();
/**
* Create the factor ||x-xk - (-g)||^2 where xk is the current feasible solution
@ -49,34 +44,37 @@ public:
* The least-square solution of this quadratic subject to a set of linear constraints
* is the projection of the gradient onto the constraints' subspace
*/
GaussianFactorGraph::shared_ptr createLeastSquareFactors(
const LinearCost &cost, const VectorValues &xk) const;
static GaussianFactorGraph buildCostFunction(const LP& lp,
const VectorValues& xk) {
GaussianFactorGraph graph;
for (LinearCost::const_iterator it = lp.cost.begin(); it != lp.cost.end();
++it) {
size_t dim = lp.cost.getDim(it);
Vector b = xk.at(*it) - lp.cost.getA(it).transpose(); // b = xk-g
graph.push_back(JacobianFactor(*it, Matrix::Identity(dim, dim), b));
}
/// Find solution with the current working set
//SAME
VectorValues solveWithCurrentWorkingSet(const VectorValues &xk,
const InequalityFactorGraph &workingSet) const;
/*
* A dual factor takes the objective function and a set of constraints.
* It then creates a least-square approximation of the lagrangian multipliers
* for the following problem: f' = - lambda * g' where f is the objection
* function g are dual factors and lambda is the lagrangian multiplier.
*/
//SAME
JacobianFactor::shared_ptr createDualFactor(Key key,
const InequalityFactorGraph &workingSet, const VectorValues &delta) const;
/** Optimize with the provided feasible initial values
* TODO: throw exception if the initial values is not feasible wrt inequality constraints
* TODO: comment duals
*/
pair<VectorValues, VectorValues> optimize(const VectorValues &initialValues,
const VectorValues &duals = VectorValues()) const;
/**
* Optimize without initial values.
*/
pair<VectorValues, VectorValues> optimize() const;
KeySet allKeys = lp.inequalities.keys();
allKeys.merge(lp.equalities.keys());
allKeys.merge(KeySet(lp.cost.keys()));
// Add corresponding factors for all variables that are not explicitly in
// the cost function. Gradients of the cost function wrt to these variables
// are zero (g=0), so b=xk
if (lp.cost.keys().size() != allKeys.size()) {
KeySet difference;
std::set_difference(allKeys.begin(), allKeys.end(), lp.cost.begin(),
lp.cost.end(),
std::inserter(difference, difference.end()));
for (Key k : difference) {
size_t dim = lp.constrainedKeyDimMap().at(k);
graph.push_back(
JacobianFactor(k, Matrix::Identity(dim, dim), xk.at(k)));
}
}
return graph;
}
};
} // namespace gtsam
using LPSolver = ActiveSetSolver<LP, LPPolicy, LPInitSolver>;
}

View File

@ -1,44 +0,0 @@
/**
* @file LPState.h
* @brief This struct holds the state of QPSolver at each iteration
* @author Ivan Dario Jimenez
* @date 1/24/16
*/
#include <gtsam/linear/VectorValues.h>
#include "InequalityFactorGraph.h"
namespace gtsam {
/*
* This struct contains the state information for a single iteration of an
* active set method iteration.
*/
struct LPState {
// A itermediate value for the value of the final solution.
VectorValues values;
// Constains the set of duals computed during the iteration that retuned this
// state.
VectorValues duals;
// An inequality Factor Graph that contains only the active constriants.
InequalityFactorGraph workingSet;
// True if the algorithm has converged to a solution
bool converged;
// counter for the number of iteration. Incremented at the end of each iter.
size_t iterations;
/// default constructor
LPState() :
values(), duals(), workingSet(), converged(false), iterations(0) {
}
/// constructor with initial values
LPState(const VectorValues& initialValues, const VectorValues& initialDuals,
const InequalityFactorGraph& initialWorkingSet, bool _converged,
size_t _iterations) :
values(initialValues), duals(initialDuals), workingSet(initialWorkingSet), converged(
_converged), iterations(_iterations) {
}
};
}

View File

@ -0,0 +1,54 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file QPInitSolver.h
* @brief This finds a feasible solution for a QP problem
* @author Duy Nguyen Ta
* @author Ivan Dario Jimenez
* @date 6/16/16
*/
#pragma once
#include <gtsam_unstable/linear/LPInitSolver.h>
namespace gtsam {
/**
* This class finds a feasible solution for a QP problem.
* This uses the Matlab strategy for initialization
* For details, see
* http://www.mathworks.com/help/optim/ug/quadratic-programming-algorithms.html#brrzwpf-22
*/
class QPInitSolver {
const QP& qp_;
public:
/// Constructor with a QP problem
QPInitSolver(const QP& qp) : qp_(qp) {}
///@return a feasible initialization point
VectorValues solve() const {
// Make an LP with any linear cost function. It doesn't matter for
// initialization.
LP initProblem;
// make an unrelated key for a random variable cost
Key newKey = maxKey(qp_) + 1;
initProblem.cost = LinearCost(newKey, Vector::Ones(1));
initProblem.equalities = qp_.equalities;
initProblem.inequalities = qp_.inequalities;
LPInitSolver initSolver(initProblem);
return initSolver.solve();
}
};
}

View File

@ -16,136 +16,8 @@
* @author Duy-Nguyen Ta
*/
#include <gtsam/inference/Symbol.h>
#include <gtsam/inference/FactorGraph-inst.h>
#include <gtsam_unstable/linear/QPSolver.h>
#include <gtsam_unstable/linear/LPSolver.h>
#include <gtsam_unstable/linear/InfeasibleInitialValues.h>
#include <boost/range/adaptor/map.hpp>
#include <gtsam_unstable/linear/LPInitSolver.h>
using namespace std;
namespace gtsam {
//******************************************************************************
QPSolver::QPSolver(const QP& qp) :
ActiveSetSolver(1.0), qp_(qp) {
equalityVariableIndex_ = VariableIndex(qp_.equalities);
inequalityVariableIndex_ = VariableIndex(qp_.inequalities);
constrainedKeys_ = qp_.equalities.keys();
constrainedKeys_.merge(qp_.inequalities.keys());
}
//***************************************************cc***************************
VectorValues QPSolver::solveWithCurrentWorkingSet(
const InequalityFactorGraph& workingSet) const {
GaussianFactorGraph workingGraph = qp_.cost;
workingGraph.push_back(qp_.equalities);
for (const LinearInequality::shared_ptr& factor : workingSet) {
if (factor->active())
workingGraph.push_back(factor);
}
return workingGraph.optimize();
}
//******************************************************************************
JacobianFactor::shared_ptr QPSolver::createDualFactor(
Key key, const InequalityFactorGraph& workingSet,
const VectorValues& delta) const {
// Transpose the A matrix of constrained factors to have the jacobian of the
// dual key
TermsContainer Aterms = collectDualJacobians<LinearEquality>(
key, qp_.equalities, equalityVariableIndex_);
TermsContainer AtermsInequalities = collectDualJacobians<LinearInequality>(
key, workingSet, inequalityVariableIndex_);
Aterms.insert(Aterms.end(), AtermsInequalities.begin(),
AtermsInequalities.end());
// Collect the gradients of unconstrained cost factors to the b vector
if (Aterms.size() > 0) {
Vector b = qp_.costGradient(key, delta);
// to compute the least-square approximation of dual variables
return boost::make_shared<JacobianFactor>(Aterms, b);
} else {
return boost::make_shared<JacobianFactor>();
}
}
//******************************************************************************
QPState QPSolver::iterate(const QPState& state) const {
// Algorithm 16.3 from Nocedal06book.
// Solve with the current working set eqn 16.39, but instead of solving for p
// solve for x
VectorValues newValues = solveWithCurrentWorkingSet(state.workingSet);
// If we CAN'T move further
// if p_k = 0 is the original condition, modified by Duy to say that the state
// update is zero.
if (newValues.equals(state.values, 1e-7)) {
// Compute lambda from the dual graph
GaussianFactorGraph::shared_ptr dualGraph = buildDualGraph(state.workingSet,
newValues);
VectorValues duals = dualGraph->optimize();
int leavingFactor = identifyLeavingConstraint(state.workingSet, duals);
// If all inequality constraints are satisfied: We have the solution!!
if (leavingFactor < 0) {
return QPState(newValues, duals, state.workingSet, true,
state.iterations + 1);
} else {
// Inactivate the leaving constraint
InequalityFactorGraph newWorkingSet = state.workingSet;
newWorkingSet.at(leavingFactor)->inactivate();
return QPState(newValues, duals, newWorkingSet, false,
state.iterations + 1);
}
} else {
// If we CAN make some progress, i.e. p_k != 0
// Adapt stepsize if some inactive constraints complain about this move
double alpha;
int factorIx;
VectorValues p = newValues - state.values;
boost::tie(alpha, factorIx) = // using 16.41
computeStepSize(state.workingSet, state.values, p);
// also add to the working set the one that complains the most
InequalityFactorGraph newWorkingSet = state.workingSet;
if (factorIx >= 0)
newWorkingSet.at(factorIx)->activate();
// step!
newValues = state.values + alpha * p;
return QPState(newValues, state.duals, newWorkingSet, false,
state.iterations + 1);
}
}
//******************************************************************************
pair<VectorValues, VectorValues> QPSolver::optimize(
const VectorValues& initialValues, const VectorValues& duals,
bool useWarmStart) const {
// Initialize workingSet from the feasible initialValues
InequalityFactorGraph workingSet = identifyActiveConstraints(qp_.inequalities,
initialValues, duals, useWarmStart);
QPState state(initialValues, duals, workingSet, false, 0);
/// main loop of the solver
while (!state.converged)
state = iterate(state);
return make_pair(state.values, state.duals);
}
//******************************************************************************
pair<VectorValues, VectorValues> QPSolver::optimize() const {
//Make an LP with any linear cost function. It doesn't matter for initialization.
LP initProblem;
// make an unrelated key for a random variable cost
Key newKey = maxKey(qp_) + 1;
initProblem.cost = LinearCost(newKey, Vector::Ones(1));
initProblem.equalities = qp_.equalities;
initProblem.inequalities = qp_.inequalities;
LPInitSolver initSolver(initProblem);
VectorValues initValues = initSolver.solve();
return optimize(initValues);
}
} /* namespace gtsam */
constexpr double QPPolicy::maxAlpha;
}

View File

@ -10,71 +10,34 @@
* -------------------------------------------------------------------------- */
/**
* @file QPSolver.h
* @brief A quadratic programming solver implements the active set method
* @date Apr 15, 2014
* @author Ivan Dario Jimenez
* @author Duy-Nguyen Ta
* @file QPSolver.h
* @brief Policy of ActiveSetSolver to solve Quadratic Programming Problems
* @author Duy Nguyen Ta
* @author Ivan Dario Jimenez
* @date 6/16/16
*/
#pragma once
#include <gtsam_unstable/linear/QP.h>
#include <gtsam_unstable/linear/ActiveSetSolver.h>
#include <gtsam_unstable/linear/QPState.h>
#include <gtsam/linear/VectorValues.h>
#include <vector>
#include <set>
#include <gtsam_unstable/linear/QPInitSolver.h>
#include <limits>
#include <algorithm>
namespace gtsam {
/**
* This QPSolver uses the active set method to solve a quadratic programming problem
* defined in the QP struct.
* Note: This version of QPSolver only works with a feasible initial value.
*/
//TODO: Remove Vector Values
class QPSolver: public ActiveSetSolver {
const QP& qp_; //!< factor graphs of the QP problem, can't be modified!
public:
/// Constructor
QPSolver(const QP& qp);
/// Find solution with the current working set
//SAME
VectorValues solveWithCurrentWorkingSet(
const InequalityFactorGraph& workingSet) const;
/// Create a dual factor
//SAME
JacobianFactor::shared_ptr createDualFactor(Key key,
const InequalityFactorGraph& workingSet, const VectorValues& delta) const;
/// Iterate 1 step, return a new state with a new workingSet and values
QPState iterate(const QPState& state) const;
/**
* Optimize with provided initial values
* For this version, it is the responsibility of the caller to provide
* a feasible initial value, otherwise, an exception will be thrown.
* @return a pair of <primal, dual> solutions
*/
std::pair<VectorValues, VectorValues> optimize(
const VectorValues& initialValues, const VectorValues& duals =
VectorValues(), bool useWarmStart = true) const;
/**
* For this version the caller will not have to provide an initial value
* Uses the matlab strategy for initialization
* See http://www.mathworks.com/help/optim/ug/quadratic-programming-algorithms.html#brrzwpf-22
* For details
* @return a pair of <primal, dual> solutions
*/
std::pair<VectorValues, VectorValues> optimize() const;
/// Policy for ActivetSetSolver to solve Linear Programming \sa QP problems
struct QPPolicy {
/// Maximum alpha for line search x'=xk + alpha*p, where p is the cost gradient
/// For QP, maxAlpha = 1 is the minimum point of the quadratic cost
static constexpr double maxAlpha = 1.0;
/// Simply the cost of the QP problem
static const GaussianFactorGraph& buildCostFunction(
const QP& qp, const VectorValues& xk = VectorValues()) {
return qp.cost;
}
};
} // namespace gtsam
using QPSolver = ActiveSetSolver<QP, QPPolicy, QPInitSolver>;
}

View File

@ -1,29 +0,0 @@
//
// Created by ivan on 1/25/16.
//
#pragma once
namespace gtsam {
/// This struct holds the state of QPSolver at each iteration
struct QPState {
VectorValues values;
VectorValues duals;
InequalityFactorGraph workingSet;
bool converged;
size_t iterations;
/// default constructor
QPState() :
values(), duals(), workingSet(), converged(false), iterations(0) {
}
/// constructor with initial values
QPState(const VectorValues& initialValues, const VectorValues& initialDuals,
const InequalityFactorGraph& initialWorkingSet, bool _converged,
size_t _iterations) :
values(initialValues), duals(initialDuals), workingSet(initialWorkingSet), converged(
_converged), iterations(_iterations) {
}
};
}

View File

@ -47,12 +47,17 @@ static const Vector kOne = Vector::Ones(1), kZero = Vector::Zero(1);
*/
LP simpleLP1() {
LP lp;
lp.cost = LinearCost(1, Vector2(-1., -1.)); // min -x1-x2 (max x1+x2)
lp.inequalities.push_back(LinearInequality(1, Vector2(-1, 0), 0, 1)); // x1 >= 0
lp.inequalities.push_back(LinearInequality(1, Vector2(0, -1), 0, 2)); // x2 >= 0
lp.inequalities.push_back(LinearInequality(1, Vector2(1, 2), 4, 3)); // x1 + 2*x2 <= 4
lp.inequalities.push_back(LinearInequality(1, Vector2(4, 2), 12, 4)); // 4x1 + 2x2 <= 12
lp.inequalities.push_back(LinearInequality(1, Vector2(-1, 1), 1, 5)); // -x1 + x2 <= 1
lp.cost = LinearCost(1, Vector2(-1., -1.)); // min -x1-x2 (max x1+x2)
lp.inequalities.push_back(
LinearInequality(1, Vector2(-1, 0), 0, 1)); // x1 >= 0
lp.inequalities.push_back(
LinearInequality(1, Vector2(0, -1), 0, 2)); // x2 >= 0
lp.inequalities.push_back(
LinearInequality(1, Vector2(1, 2), 4, 3)); // x1 + 2*x2 <= 4
lp.inequalities.push_back(
LinearInequality(1, Vector2(4, 2), 12, 4)); // 4x1 + 2x2 <= 12
lp.inequalities.push_back(
LinearInequality(1, Vector2(-1, 1), 1, 5)); // -x1 + x2 <= 1
return lp;
}
@ -61,15 +66,20 @@ namespace gtsam {
TEST(LPInitSolver, infinite_loop_single_var) {
LP initchecker;
initchecker.cost = LinearCost(1,Vector3(0,0,1)); //min alpha
initchecker.inequalities.push_back(LinearInequality(1, Vector3(-2,-1,-1),-2,1));//-2x-y-alpha <= -2
initchecker.inequalities.push_back(LinearInequality(1, Vector3(-1,2,-1), 6, 2));// -x+2y-alpha <= 6
initchecker.inequalities.push_back(LinearInequality(1, Vector3(-1,0,-1), 0,3));// -x - alpha <= 0
initchecker.inequalities.push_back(LinearInequality(1, Vector3(1,0,-1), 20, 4));//x - alpha <= 20
initchecker.inequalities.push_back(LinearInequality(1, Vector3(0,-1,-1),0, 5));// -y - alpha <= 0
initchecker.cost = LinearCost(1, Vector3(0, 0, 1)); // min alpha
initchecker.inequalities.push_back(
LinearInequality(1, Vector3(-2, -1, -1), -2, 1)); //-2x-y-alpha <= -2
initchecker.inequalities.push_back(
LinearInequality(1, Vector3(-1, 2, -1), 6, 2)); // -x+2y-alpha <= 6
initchecker.inequalities.push_back(
LinearInequality(1, Vector3(-1, 0, -1), 0, 3)); // -x - alpha <= 0
initchecker.inequalities.push_back(
LinearInequality(1, Vector3(1, 0, -1), 20, 4)); // x - alpha <= 20
initchecker.inequalities.push_back(
LinearInequality(1, Vector3(0, -1, -1), 0, 5)); // -y - alpha <= 0
LPSolver solver(initchecker);
VectorValues starter;
starter.insert(1,Vector3(0,0,2));
starter.insert(1, Vector3(0, 0, 2));
VectorValues results, duals;
boost::tie(results, duals) = solver.optimize(starter);
VectorValues expected;
@ -82,14 +92,19 @@ TEST(LPInitSolver, infinite_loop_multi_var) {
Key X = symbol('X', 1);
Key Y = symbol('Y', 1);
Key Z = symbol('Z', 1);
initchecker.cost = LinearCost(Z, kOne); //min alpha
initchecker.cost = LinearCost(Z, kOne); // min alpha
initchecker.inequalities.push_back(
LinearInequality(X, -2.0 * kOne, Y, -1.0 * kOne, Z, -1.0 * kOne, -2, 1));//-2x-y-alpha <= -2
LinearInequality(X, -2.0 * kOne, Y, -1.0 * kOne, Z, -1.0 * kOne, -2,
1)); //-2x-y-alpha <= -2
initchecker.inequalities.push_back(
LinearInequality(X, -1.0 * kOne, Y, 2.0 * kOne, Z, -1.0 * kOne, 6, 2));// -x+2y-alpha <= 6
initchecker.inequalities.push_back(LinearInequality(X, -1.0 * kOne, Z, -1.0 * kOne, 0, 3));// -x - alpha <= 0
initchecker.inequalities.push_back(LinearInequality(X, 1.0 * kOne, Z, -1.0 * kOne, 20, 4));//x - alpha <= 20
initchecker.inequalities.push_back(LinearInequality(Y, -1.0 * kOne, Z, -1.0 * kOne, 0, 5));// -y - alpha <= 0
LinearInequality(X, -1.0 * kOne, Y, 2.0 * kOne, Z, -1.0 * kOne, 6,
2)); // -x+2y-alpha <= 6
initchecker.inequalities.push_back(LinearInequality(
X, -1.0 * kOne, Z, -1.0 * kOne, 0, 3)); // -x - alpha <= 0
initchecker.inequalities.push_back(LinearInequality(
X, 1.0 * kOne, Z, -1.0 * kOne, 20, 4)); // x - alpha <= 20
initchecker.inequalities.push_back(LinearInequality(
Y, -1.0 * kOne, Z, -1.0 * kOne, 0, 5)); // -y - alpha <= 0
LPSolver solver(initchecker);
VectorValues starter;
starter.insert(X, kZero);
@ -108,7 +123,8 @@ TEST(LPInitSolver, initialization) {
LP lp = simpleLP1();
LPInitSolver initSolver(lp);
GaussianFactorGraph::shared_ptr initOfInitGraph = initSolver.buildInitOfInitGraph();
GaussianFactorGraph::shared_ptr initOfInitGraph =
initSolver.buildInitOfInitGraph();
VectorValues x0 = initOfInitGraph->optimize();
VectorValues expected_x0;
expected_x0.insert(1, Vector::Zero(2));
@ -122,16 +138,19 @@ TEST(LPInitSolver, initialization) {
LP::shared_ptr initLP = initSolver.buildInitialLP(yKey);
LP expectedInitLP;
expectedInitLP.cost = LinearCost(yKey, kOne);
expectedInitLP.inequalities.push_back(LinearInequality(
1, Vector2(-1, 0), 2, Vector::Constant(1, -1), 0, 1)); // -x1 - y <= 0
expectedInitLP.inequalities.push_back(LinearInequality(
1, Vector2(0, -1), 2, Vector::Constant(1, -1), 0, 2)); // -x2 - y <= 0
expectedInitLP.inequalities.push_back(
LinearInequality(1, Vector2( -1, 0), 2, Vector::Constant(1, -1), 0, 1)); // -x1 - y <= 0
LinearInequality(1, Vector2(1, 2), 2, Vector::Constant(1, -1), 4,
3)); // x1 + 2*x2 - y <= 4
expectedInitLP.inequalities.push_back(
LinearInequality(1, Vector2( 0, -1), 2, Vector::Constant(1, -1), 0, 2));// -x2 - y <= 0
LinearInequality(1, Vector2(4, 2), 2, Vector::Constant(1, -1), 12,
4)); // 4x1 + 2x2 - y <= 12
expectedInitLP.inequalities.push_back(
LinearInequality(1, Vector2( 1, 2), 2, Vector::Constant(1, -1), 4, 3));// x1 + 2*x2 - y <= 4
expectedInitLP.inequalities.push_back(
LinearInequality(1, Vector2( 4, 2), 2, Vector::Constant(1, -1), 12, 4));// 4x1 + 2x2 - y <= 12
expectedInitLP.inequalities.push_back(
LinearInequality(1, Vector2( -1, 1), 2, Vector::Constant(1, -1), 1, 5));// -x1 + x2 - y <= 1
LinearInequality(1, Vector2(-1, 1), 2, Vector::Constant(1, -1), 1,
5)); // -x1 + x2 - y <= 1
CHECK(assert_equal(expectedInitLP, *initLP, 1e-10));
LPSolver lpSolveInit(*initLP);
VectorValues xy0(x0);
@ -155,56 +174,61 @@ TEST(LPInitSolver, initialization) {
* x + 2y = 6
*/
TEST(LPSolver, overConstrainedLinearSystem) {
GaussianFactorGraph graph;
Matrix A1 = Vector3(1,1,1);
Matrix A2 = Vector3(1,-1,2);
Vector b = Vector3( 1, 5, 6);
JacobianFactor factor(1, A1, 2, A2, b, noiseModel::Constrained::All(3));
graph.push_back(factor);
GaussianFactorGraph graph;
Matrix A1 = Vector3(1, 1, 1);
Matrix A2 = Vector3(1, -1, 2);
Vector b = Vector3(1, 5, 6);
JacobianFactor factor(1, A1, 2, A2, b, noiseModel::Constrained::All(3));
graph.push_back(factor);
VectorValues x = graph.optimize();
// This check confirms that gtsam linear constraint solver can't handle over-constrained system
CHECK(factor.error(x) != 0.0);
VectorValues x = graph.optimize();
// This check confirms that gtsam linear constraint solver can't handle
// over-constrained system
CHECK(factor.error(x) != 0.0);
}
TEST(LPSolver, overConstrainedLinearSystem2) {
GaussianFactorGraph graph;
graph.push_back(JacobianFactor(1, I_1x1, 2, I_1x1, kOne, noiseModel::Constrained::All(1)));
graph.push_back(JacobianFactor(1, I_1x1, 2, -I_1x1, 5*kOne, noiseModel::Constrained::All(1)));
graph.push_back(JacobianFactor(1, I_1x1, 2, 2*I_1x1, 6*kOne, noiseModel::Constrained::All(1)));
VectorValues x = graph.optimize();
// This check confirms that gtsam linear constraint solver can't handle over-constrained system
CHECK(graph.error(x) != 0.0);
GaussianFactorGraph graph;
graph.push_back(JacobianFactor(1, I_1x1, 2, I_1x1, kOne,
noiseModel::Constrained::All(1)));
graph.push_back(JacobianFactor(1, I_1x1, 2, -I_1x1, 5 * kOne,
noiseModel::Constrained::All(1)));
graph.push_back(JacobianFactor(1, I_1x1, 2, 2 * I_1x1, 6 * kOne,
noiseModel::Constrained::All(1)));
VectorValues x = graph.optimize();
// This check confirms that gtsam linear constraint solver can't handle
// over-constrained system
CHECK(graph.error(x) != 0.0);
}
/* ************************************************************************* */
TEST(LPSolver, simpleTest1) {
LP lp = simpleLP1();
LPSolver lpSolver(lp);
VectorValues init;
init.insert(1, Vector::Zero(2));
LP lp = simpleLP1();
LPSolver lpSolver(lp);
VectorValues init;
init.insert(1, Vector::Zero(2));
VectorValues x1 = lpSolver.solveWithCurrentWorkingSet(init,
InequalityFactorGraph());
VectorValues expected_x1;
expected_x1.insert(1, Vector::Ones(2));
CHECK(assert_equal(expected_x1, x1, 1e-10));
VectorValues x1 =
lpSolver.buildWorkingGraph(InequalityFactorGraph(), init).optimize();
VectorValues expected_x1;
expected_x1.insert(1, Vector::Ones(2));
CHECK(assert_equal(expected_x1, x1, 1e-10));
VectorValues result, duals;
boost::tie(result, duals) = lpSolver.optimize(init);
VectorValues expectedResult;
expectedResult.insert(1, Vector2(8./3., 2./3.));
CHECK(assert_equal(expectedResult, result, 1e-10));
VectorValues result, duals;
boost::tie(result, duals) = lpSolver.optimize(init);
VectorValues expectedResult;
expectedResult.insert(1, Vector2(8. / 3., 2. / 3.));
CHECK(assert_equal(expectedResult, result, 1e-10));
}
/* ************************************************************************* */
TEST(LPSolver, testWithoutInitialValues) {
LP lp = simpleLP1();
LPSolver lpSolver(lp);
VectorValues result,duals, expectedResult;
expectedResult.insert(1, Vector2(8./3., 2./3.));
boost::tie(result, duals) = lpSolver.optimize();
CHECK(assert_equal(expectedResult, result));
LP lp = simpleLP1();
LPSolver lpSolver(lp);
VectorValues result, duals, expectedResult;
expectedResult.insert(1, Vector2(8. / 3., 2. / 3.));
boost::tie(result, duals) = lpSolver.optimize();
CHECK(assert_equal(expectedResult, result));
}
/**
@ -215,18 +239,17 @@ CHECK(assert_equal(expectedResult, result));
*/
/* ************************************************************************* */
TEST(LPSolver, LinearCost) {
LinearCost cost(1, Vector3( 2., 4., 6.));
VectorValues x;
x.insert(1, Vector3( 1., 3., 5.));
double error = cost.error(x);
double expectedError = 44.0;
DOUBLES_EQUAL(expectedError, error, 1e-100);
LinearCost cost(1, Vector3(2., 4., 6.));
VectorValues x;
x.insert(1, Vector3(1., 3., 5.));
double error = cost.error(x);
double expectedError = 44.0;
DOUBLES_EQUAL(expectedError, error, 1e-100);
}
/* ************************************************************************* */
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */

View File

@ -21,8 +21,6 @@
#include <gtsam_unstable/linear/QPSolver.h>
#include <gtsam_unstable/linear/QPSParser.h>
#include <CppUnitLite/TestHarness.h>
#include <gtsam_unstable/linear/InfeasibleInitialValues.h>
#include <gtsam_unstable/linear/InfeasibleOrUnboundedProblem.h>
using namespace std;
using namespace gtsam;
@ -144,7 +142,7 @@ TEST(QPSolver, indentifyActiveConstraints) {
CHECK(workingSet.at(2)->active());// active
CHECK(!workingSet.at(3)->active());// inactive
VectorValues solution = solver.solveWithCurrentWorkingSet(workingSet);
VectorValues solution = solver.buildWorkingGraph(workingSet).optimize();
VectorValues expectedSolution;
expectedSolution.insert(X(1), kZero);
expectedSolution.insert(X(2), kZero);
@ -179,7 +177,8 @@ TEST(QPSolver, iterate) {
InequalityFactorGraph workingSet = solver.identifyActiveConstraints(
qp.inequalities, currentSolution);
QPState state(currentSolution, VectorValues(), workingSet, false, 100);
QPSolver::State state(currentSolution, VectorValues(), workingSet, false,
100);
int it = 0;
while (!state.converged) {

View File

@ -1,77 +0,0 @@
import gtsam.*
g = [-1; -1] % min -x1-x2
C = [-1 0
0 -1
1 2
4 2
-1 1]';
b =[0;0;4;12;1]
%% step 0
m = length(C);
active = zeros(1, m);
x0 = [0;0];
for iter = 1:2
% project -g onto the nullspace of active constraints in C to obtain the moving direction
% It boils down to solving the following constrained linear least squares
% system: min_d// || d// - d ||^2
% s.t. C*d// = 0
% where d = -g, the opposite direction of the objective's gradient vector
dgraph = GaussianFactorGraph
dgraph.push_back(JacobianFactor(1, eye(2), -g, noiseModel.Unit.Create(2)));
for i=1:m
if (active(i)==1)
ci = C(:,i);
dgraph.push_back(JacobianFactor(1, ci', 0, noiseModel.Constrained.All(1)));
end
end
d = dgraph.optimize.at(1)
% Find the bad active constraints and remove them
% TODO: FIXME Is this implementation correct?
for i=1:m
if (active(i) == 1)
ci = C(:,i);
if ci'*d < 0
active(i) = 0;
end
end
end
active
% We are going to jump:
% x1 = x0 + k*d;, k>=0
% So check all inactive constraints that block the jump to find the smallest k
% ci*x1 - bi <=0
% ci*(x0 + k*d) - bi <= 0
% ci*x0-bi + ci*k*d <= 0
% - if ci*d < 0: great, no prob (-ci and d have the same direction)
% - if ci*d > 0: (-ci and d have opposite directions)
% k <= -(ci*x0 - bi)/(ci*d)
k = 100000;
newActive = -1;
for i=1:m
if active(i) == 1
continue
end
ci = C(:,i);
if ci'*d > 0
foundNewActive = true;
if k > -(ci'*x0 - b(i))/(ci'*d)
k = -(ci'*x0 - b(i))/(ci'*d);
newActive = i;
end
end
end
% If there is no blocking constraint, the problem is unbounded
if newActive == -1
disp('Unbounded')
else
% Otherwise, make the jump, and we have a new active constraint
x0 = x0 + k*d
active(newActive) = 1
end
end