[REFACTOR] Remove deprecated vector initialization calls.
parent
b387a08b66
commit
c90304398e
|
@ -87,19 +87,19 @@ public:
|
|||
std::vector<Matrix> G11;
|
||||
evaluateHessians(x1, G11);
|
||||
|
||||
if (dim(lambda) != G11.size()) {
|
||||
if (lambda.size() != G11.size()) {
|
||||
throw std::runtime_error(
|
||||
"Error in evaluateHessians: the number of returned Gij matrices must be the same as the constraint dimension!");
|
||||
}
|
||||
|
||||
// Combine the Lagrange-multiplier part into this constraint factor
|
||||
Matrix lG11sum = zeros(G11[0].rows(), G11[0].cols());
|
||||
Matrix lG11sum = Matrix::Zero(G11[0].rows(), G11[0].cols());
|
||||
for (size_t i = 0; i < lambda.rows(); ++i) {
|
||||
lG11sum += -lambda[i] * G11[i];
|
||||
}
|
||||
|
||||
HessianFactor::shared_ptr hf(new HessianFactor(Base::key(), lG11sum,
|
||||
zero(X1Dim), 100.0));
|
||||
Vector::Zero(X1Dim), 100.0));
|
||||
return hf;
|
||||
}
|
||||
|
||||
|
@ -211,15 +211,16 @@ public:
|
|||
std::vector<Matrix> G11, G12, G22;
|
||||
evaluateHessians(x1, x2, G11, G12, G22);
|
||||
|
||||
if (dim(lambda) != G11.size() || dim(lambda) != G12.size()
|
||||
|| dim(lambda) != G22.size()) {
|
||||
if (lambda.size() != G11.size() || lambda.size() != G12.size()
|
||||
|| lambda.size() != G22.size()) {
|
||||
throw std::runtime_error(
|
||||
"Error in evaluateHessians: the number of returned Gij matrices must be the same as the constraint dimension!");
|
||||
}
|
||||
|
||||
// Combine the Lagrange-multiplier part into this constraint factor
|
||||
Matrix lG11sum = zeros(G11[0].rows(), G11[0].cols()), lG12sum = zeros(
|
||||
G12[0].rows(), G12[0].cols()), lG22sum = zeros(G22[0].rows(),
|
||||
Matrix lG11sum = Matrix::Zero(G11[0].rows(), G11[0].cols()), lG12sum =
|
||||
Matrix::Zero(G12[0].rows(), G12[0].cols()), lG22sum = Matrix::Zero(
|
||||
G22[0].rows(),
|
||||
G22[0].cols());
|
||||
for (size_t i = 0; i < lambda.rows(); ++i) {
|
||||
lG11sum += -lambda[i] * G11[i];
|
||||
|
@ -228,7 +229,8 @@ public:
|
|||
}
|
||||
|
||||
return boost::make_shared<HessianFactor>(Base::keys_[0], Base::keys_[1],
|
||||
lG11sum, lG12sum, zero(X1Dim), lG22sum, zero(X2Dim), 0.0);
|
||||
lG11sum, lG12sum, Vector::Zero(
|
||||
X1Dim), lG22sum, Vector::Zero(X2Dim), 0.0);
|
||||
}
|
||||
|
||||
/** evaluate Hessians for lambda factors */
|
||||
|
@ -368,18 +370,19 @@ public:
|
|||
std::vector<Matrix> G11, G12, G13, G22, G23, G33;
|
||||
evaluateHessians(x1, x2, x3, G11, G12, G13, G22, G23, G33);
|
||||
|
||||
if (dim(lambda) != G11.size() || dim(lambda) != G12.size()
|
||||
|| dim(lambda) != G13.size() || dim(lambda) != G22.size()
|
||||
|| dim(lambda) != G23.size() || dim(lambda) != G33.size()) {
|
||||
if (lambda.size() != G11.size() || lambda.size() != G12.size()
|
||||
|| lambda.size() != G13.size() || lambda.size() != G22.size()
|
||||
|| lambda.size() != G23.size() || lambda.size() != G33.size()) {
|
||||
throw std::runtime_error(
|
||||
"Error in evaluateHessians: the number of returned Gij matrices must be the same as the constraint dimension!");
|
||||
}
|
||||
|
||||
// Combine the Lagrange-multiplier part into this constraint factor
|
||||
Matrix lG11sum = zeros(G11[0].rows(), G11[0].cols()), lG12sum = zeros(
|
||||
G12[0].rows(), G12[0].cols()), lG13sum = zeros(G13[0].rows(),
|
||||
G13[0].cols()), lG22sum = zeros(G22[0].rows(), G22[0].cols()), lG23sum =
|
||||
zeros(G23[0].rows(), G23[0].cols()), lG33sum = zeros(G33[0].rows(),
|
||||
Matrix lG11sum = Matrix::Zero(G11[0].rows(), G11[0].cols()), lG12sum =
|
||||
Matrix::Zero(G12[0].rows(), G12[0].cols()), lG13sum = Matrix::Zero(
|
||||
G13[0].rows(), G13[0].cols()), lG22sum = Matrix::Zero(G22[0].rows(),
|
||||
G22[0].cols()), lG23sum = Matrix::Zero(G23[0].rows(), G23[0].cols()),
|
||||
lG33sum = Matrix::Zero(G33[0].rows(),
|
||||
G33[0].cols());
|
||||
for (size_t i = 0; i < lambda.rows(); ++i) {
|
||||
lG11sum += -lambda[i] * G11[i];
|
||||
|
@ -392,8 +395,8 @@ public:
|
|||
|
||||
return boost::shared_ptr<HessianFactor>(
|
||||
new HessianFactor(Base::keys_[0], Base::keys_[1], Base::keys_[2],
|
||||
lG11sum, lG12sum, lG13sum, zero(X1Dim), lG22sum, lG23sum,
|
||||
zero(X2Dim), lG33sum, zero(X3Dim), 0.0));
|
||||
lG11sum, lG12sum, lG13sum, Vector::Zero(X1Dim), lG22sum, lG23sum,
|
||||
Vector::Zero(X2Dim), lG33sum, Vector::Zero(X3Dim), 0.0));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -51,9 +51,9 @@ public:
|
|||
boost::optional<Matrix&> H1 = boost::none, boost::optional<Matrix&> H2 =
|
||||
boost::none) const {
|
||||
if (H1)
|
||||
*H1 = eye(1);
|
||||
*H1 = I_1x1;
|
||||
if (H2)
|
||||
*H2 = eye(1);
|
||||
*H2 = I_1x1;
|
||||
return (Vector(1) << x + y - 1.0).finished();
|
||||
}
|
||||
};
|
||||
|
@ -65,11 +65,11 @@ TEST(testlcnlpSolver, QPProblem) {
|
|||
// Note the Hessian encodes:
|
||||
// 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f
|
||||
// Hence here we have G11 = 2, G12 = 0, G22 = 2, g1 = 0, g2 = 0, f = 0
|
||||
HessianFactor hf(X(1), Y(1), 2.0 * ones(1,1), zero(1), zero(1),
|
||||
2*ones(1,1), zero(1) , 0);
|
||||
HessianFactor hf(X(1), Y(1), 2.0 * Matrix::Ones(1,1), Vector::Zero(1), Vector::Zero(1),
|
||||
2*Matrix::Ones(1,1), Vector::Zero(1) , 0);
|
||||
|
||||
EqualityFactorGraph equalities;
|
||||
LinearEquality linearConstraint(X(1), ones(1), Y(1), ones(1), 1*ones(1), dualKey);// x + y - 1 = 0
|
||||
LinearEquality linearConstraint(X(1), Vector::Ones(1), Y(1), Vector::Ones(1), Vector::Ones(1), dualKey);// x + y - 1 = 0
|
||||
equalities.push_back(linearConstraint);
|
||||
|
||||
// Compare against QP
|
||||
|
@ -81,15 +81,15 @@ TEST(testlcnlpSolver, QPProblem) {
|
|||
QPSolver qpSolver(qp);
|
||||
// create initial values for optimization
|
||||
VectorValues initialVectorValues;
|
||||
initialVectorValues.insert(X(1), zero(1));
|
||||
initialVectorValues.insert(Y(1), ones(1));
|
||||
initialVectorValues.insert(X(1), Vector::Zero(1));
|
||||
initialVectorValues.insert(Y(1), Vector::Ones(1));
|
||||
VectorValues expectedSolution = qpSolver.optimize(initialVectorValues).first;
|
||||
|
||||
//Instantiate LinearConstraintNLP
|
||||
LinearConstraintNLP lcnlp;
|
||||
Values linPoint;
|
||||
linPoint.insert<Vector1>(X(1), zero(1));
|
||||
linPoint.insert<Vector1>(Y(1), zero(1));
|
||||
linPoint.insert<Vector1>(X(1), Vector::Zero(1));
|
||||
linPoint.insert<Vector1>(Y(1), Vector::Zero(1));
|
||||
lcnlp.cost.add(LinearContainerFactor(hf, linPoint));// wrap it using linearcontainerfactor
|
||||
lcnlp.linearEqualities.add(ConstraintProblem1(X(1), Y(1), dualKey));
|
||||
|
||||
|
@ -121,7 +121,7 @@ public:
|
|||
boost::none) const {
|
||||
if (H)
|
||||
*H =
|
||||
(Matrix(1, 6) << zeros(1, 3), pose.rotation().matrix().row(0)).finished();
|
||||
(Matrix(1, 6) << Matrix::Zero(1, 3), pose.rotation().matrix().row(0)).finished();
|
||||
return (Vector(1) << pose.x()).finished();
|
||||
}
|
||||
};
|
||||
|
@ -161,9 +161,9 @@ public:
|
|||
boost::optional<Matrix&> H1 = boost::none, boost::optional<Matrix&> H2 =
|
||||
boost::none) const {
|
||||
if (H1)
|
||||
*H1 = eye(1);
|
||||
*H1 = I_1x1;
|
||||
if (H2)
|
||||
*H2 = eye(1);
|
||||
*H2 = I_1x1;
|
||||
return x + y - 1.0;
|
||||
}
|
||||
};
|
||||
|
@ -175,11 +175,11 @@ TEST(testlcnlpSolver, inequalityConstraint) {
|
|||
// Note the Hessian encodes:
|
||||
// 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f
|
||||
// Hence here we have G11 = 2, G12 = 0, G22 = 2, g1 = 0, g2 = 0, f = 0
|
||||
HessianFactor hf(X(1), Y(1), 2.0 * ones(1,1), zero(1), zero(1),
|
||||
2*ones(1,1), zero(1) , 0);
|
||||
HessianFactor hf(X(1), Y(1), 2.0 * Matrix::Ones(1,1), Vector::Zero(1), Vector::Zero(1),
|
||||
2*Matrix::Ones(1,1), Vector::Zero(1) , 0);
|
||||
|
||||
InequalityFactorGraph inequalities;
|
||||
LinearInequality linearConstraint(X(1), ones(1), Y(1), ones(1), 1.0, dualKey);// x + y - 1 <= 0
|
||||
LinearInequality linearConstraint(X(1), Vector::Ones(1), Y(1), Vector::Ones(1), 1.0, dualKey);// x + y - 1 <= 0
|
||||
inequalities.push_back(linearConstraint);
|
||||
|
||||
// Compare against QP
|
||||
|
@ -191,15 +191,15 @@ TEST(testlcnlpSolver, inequalityConstraint) {
|
|||
QPSolver qpSolver(qp);
|
||||
// create initial values for optimization
|
||||
VectorValues initialVectorValues;
|
||||
initialVectorValues.insert(X(1), zero(1));
|
||||
initialVectorValues.insert(Y(1), zero(1));
|
||||
initialVectorValues.insert(X(1), Vector::Zero(1));
|
||||
initialVectorValues.insert(Y(1), Vector::Zero(1));
|
||||
VectorValues expectedSolution = qpSolver.optimize(initialVectorValues).first;
|
||||
|
||||
//Instantiate LinearConstraintNLP
|
||||
LinearConstraintNLP lcnlp;
|
||||
Values linPoint;
|
||||
linPoint.insert<Vector1>(X(1), zero(1));
|
||||
linPoint.insert<Vector1>(Y(1), zero(1));
|
||||
linPoint.insert<Vector1>(X(1), Vector::Zero(1));
|
||||
linPoint.insert<Vector1>(Y(1), Vector::Zero(1));
|
||||
lcnlp.cost.add(LinearContainerFactor(hf, linPoint));// wrap it using linearcontainerfactor
|
||||
lcnlp.linearInequalities.add(InequalityProblem1(X(1), Y(1), dualKey));
|
||||
|
||||
|
|
|
@ -62,11 +62,11 @@ public:
|
|||
const LieScalar& x3, std::vector<Matrix>& G11, std::vector<Matrix>& G12,
|
||||
std::vector<Matrix>& G13, std::vector<Matrix>& G22,
|
||||
std::vector<Matrix>& G23, std::vector<Matrix>& G33) const {
|
||||
G11.push_back(zeros(1, 1));
|
||||
G12.push_back(zeros(1, 1));
|
||||
G13.push_back(zeros(1, 1));
|
||||
G11.push_back(Matrix::Zero(1, 1));
|
||||
G12.push_back(Matrix::Zero(1, 1));
|
||||
G13.push_back(Matrix::Zero(1, 1));
|
||||
G22.push_back((Matrix(1, 1) << 2.0).finished());
|
||||
G23.push_back(zeros(1, 1));
|
||||
G23.push_back(Matrix::Zero(1, 1));
|
||||
G33.push_back((Matrix(1, 1) << 6.0 * x3.value()).finished());
|
||||
}
|
||||
};
|
||||
|
@ -126,20 +126,20 @@ public:
|
|||
std::vector<Matrix>& G11, std::vector<Matrix>& G12,
|
||||
std::vector<Matrix>& G13, std::vector<Matrix>& G22,
|
||||
std::vector<Matrix>& G23, std::vector<Matrix>& G33) const {
|
||||
G11.push_back(zeros(2, 2));
|
||||
G11.push_back(Matrix::Zero(2, 2));
|
||||
G11.push_back((Matrix(2,2) << 2.0, 0.0, 0.0, 0.0).finished());
|
||||
|
||||
G12.push_back(zeros(2,2));
|
||||
G12.push_back(zeros(2,2));
|
||||
G12.push_back(Matrix::Zero(2, 2));
|
||||
G12.push_back(Matrix::Zero(2, 2));
|
||||
|
||||
G13.push_back(zeros(2,2));
|
||||
G13.push_back(zeros(2,2));
|
||||
G13.push_back(Matrix::Zero(2, 2));
|
||||
G13.push_back(Matrix::Zero(2, 2));
|
||||
|
||||
G22.push_back((Matrix(2,2) << 2.0, 0.0, 0.0, 0.0).finished());
|
||||
G22.push_back(zeros(2,2));
|
||||
G22.push_back(Matrix::Zero(2, 2));
|
||||
|
||||
G23.push_back(zeros(2, 2));
|
||||
G23.push_back(zeros(2, 2));
|
||||
G23.push_back(Matrix::Zero(2, 2));
|
||||
G23.push_back(Matrix::Zero(2, 2));
|
||||
|
||||
G33.push_back((Matrix(2, 2) << 0.0, 0.0, 0.0, 6.0 * x3.y() ).finished());
|
||||
G33.push_back((Matrix(2, 2) << 6.0 * x3.x(), 0.0, 0.0, 0.0 ).finished());
|
||||
|
|
Loading…
Reference in New Issue