diff --git a/.settings/org.eclipse.cdt.core.prefs b/.settings/org.eclipse.cdt.core.prefs new file mode 100644 index 000000000..b700d0a81 --- /dev/null +++ b/.settings/org.eclipse.cdt.core.prefs @@ -0,0 +1,6 @@ +eclipse.preferences.version=1 +environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544/PATH/delimiter=\: +environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544/PATH/operation=append +environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544/PATH/value=$PATH\:/opt/local/bin +environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544/append=true +environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544/appendContributed=true diff --git a/CppUnitLite/Test.h b/CppUnitLite/Test.h index 3ea6a6318..ff1f1b692 100644 --- a/CppUnitLite/Test.h +++ b/CppUnitLite/Test.h @@ -80,6 +80,12 @@ protected: testGroup##testName##Instance; \ void testGroup##testName##Test::run (TestResult& result_) +/** + * Use this to disable unwanted tests without commenting them out. + */ +#define TEST_DISABLED(testGroup, testName)\ + void testGroup##testName##Test(TestResult& result_, const std::string& name_) + /* * Convention for tests: * - "EXPECT" is a test that will not end execution of the series of tests diff --git a/gtsam.h b/gtsam.h index 56bf378b5..cd540e39d 100644 --- a/gtsam.h +++ b/gtsam.h @@ -1224,6 +1224,7 @@ class VectorValues { #include virtual class GaussianFactor { + gtsam::KeyVector keys() const; void print(string s) const; bool equals(const gtsam::GaussianFactor& lf, double tol) const; double error(const gtsam::VectorValues& c) const; @@ -1647,6 +1648,7 @@ class NonlinearFactorGraph { void push_back(gtsam::NonlinearFactor* factor); void add(gtsam::NonlinearFactor* factor); bool exists(size_t idx) const; + gtsam::KeySet keys() const; // NonlinearFactorGraph double error(const gtsam::Values& values) const; diff --git a/gtsam/base/Vector.cpp b/gtsam/base/Vector.cpp index b872aa08b..a9ef8fe10 100644 --- a/gtsam/base/Vector.cpp +++ b/gtsam/base/Vector.cpp @@ -286,11 +286,15 @@ double weightedPseudoinverse(const Vector& a, const Vector& weights, for (size_t i = 0; i < m; ++i) isZero.push_back(fabs(a[i]) < 1e-9); // If there is a valid (a!=0) constraint (sigma==0) return the first one - for (size_t i = 0; i < m; ++i) + for (size_t i = 0; i < m; ++i) { if (weights[i] == inf && !isZero[i]) { + // Basically, instead of doing a normal QR step with the weighted + // pseudoinverse, we enforce the constraint by turning + // ax + AS = b into x + (A/a)S = b/a, for the first row where a!=0 pseudo = delta(m, i, 1 / a[i]); return inf; } + } // Form psuedo-inverse inv(a'inv(Sigma)a)a'inv(Sigma) // For diagonal Sigma, inv(Sigma) = diag(precisions) diff --git a/gtsam/geometry/Point2.h b/gtsam/geometry/Point2.h index 5dabc4bd6..fadea652b 100644 --- a/gtsam/geometry/Point2.h +++ b/gtsam/geometry/Point2.h @@ -173,6 +173,16 @@ public: /// Log map around identity - just return the Point2 as a vector static inline Vector2 Logmap(const Point2& dp) { return Vector2(dp.x(), dp.y()); } + /// Left-trivialized derivative of the exponential map + static Matrix dexpL(const Vector2& v) { + return eye(2); + } + + /// Left-trivialized derivative inverse of the exponential map + static Matrix dexpInvL(const Vector2& v) { + return eye(2); + } + /// @} /// @name Vector Space /// @{ diff --git a/gtsam/geometry/Pose2.cpp b/gtsam/geometry/Pose2.cpp index edc16af03..bea2c54c2 100644 --- a/gtsam/geometry/Pose2.cpp +++ b/gtsam/geometry/Pose2.cpp @@ -123,6 +123,62 @@ Matrix3 Pose2::AdjointMap() const { return rvalue; } +/* ************************************************************************* */ +Matrix3 Pose2::adjointMap(const Vector& v) { + // See Chirikjian12book2, vol.2, pg. 36 + Matrix3 ad = zeros(3,3); + ad(0,1) = -v[2]; + ad(1,0) = v[2]; + ad(0,2) = v[1]; + ad(1,2) = -v[0]; + return ad; +} + +/* ************************************************************************* */ +Matrix3 Pose2::dexpL(const Vector3& v) { + // See Iserles05an, pg. 33. + // TODO: Duplicated code. Maybe unify them at higher Lie level? + static const int N = 10; // order of approximation + Matrix3 res = I_3x3; + Matrix3 ad_i = I_3x3; + Matrix3 ad = adjointMap(v); + double fac = 1.0; + for (int i = 1; i < N; ++i) { + ad_i = ad * ad_i; + fac = fac * (i+1); + // Since this is the left-trivialized version, we flip the signs of the odd terms + if (i%2 != 0) + res = res - 1.0 / fac * ad_i; + else + res = res + 1.0 / fac * ad_i; + } + return res; +} + +/* ************************************************************************* */ +Matrix3 Pose2::dexpInvL(const Vector3& v) { + // TODO: Duplicated code with Pose3. Maybe unify them at higher Lie level? + // Bernoulli numbers, from Wikipedia + static const Vector B = (Vector(9) << 1.0, -1.0 / 2.0, 1. / 6., 0.0, -1.0 / 30.0, + 0.0, 1.0 / 42.0, 0.0, -1.0 / 30).finished(); + static const int N = 5; // order of approximation + Matrix3 res = I_3x3; + Matrix3 ad_i = I_3x3; + Matrix3 ad = adjointMap(v); + double fac = 1.0; + for (int i = 1; i < N; ++i) { + ad_i = ad * ad_i; + fac = fac * i; + // Since this is the left-trivialized version, we flip the signs of the odd terms + // Note that all Bernoulli odd numbers are 0, except 1. + if (i==1) + res = res - B(i) / fac * ad_i; + else + res = res + B(i) / fac * ad_i; + } + return res; +} + /* ************************************************************************* */ Pose2 Pose2::inverse(OptionalJacobian<3,3> H1) const { if (H1) *H1 = -AdjointMap(); diff --git a/gtsam/geometry/Pose2.h b/gtsam/geometry/Pose2.h index a48be51cf..ba85ed0ac 100644 --- a/gtsam/geometry/Pose2.h +++ b/gtsam/geometry/Pose2.h @@ -161,6 +161,11 @@ public: return AdjointMap()*xi; } + /** + * Compute the [ad(w,v)] operator for SE2 as in [Kobilarov09siggraph], pg 19 + */ + static Matrix3 adjointMap(const Vector& v); + /** * wedge for SE(2): * @param xi 3-dim twist (v,omega) where @@ -176,6 +181,13 @@ public: return m; } + /// Left-trivialized derivative of the exponential map + static Matrix3 dexpL(const Vector3& v); + + /// Left-trivialized derivative inverse of the exponential map + static Matrix3 dexpInvL(const Vector3& v); + + /// @} /// @name Group Action on Point2 /// @{ diff --git a/gtsam/geometry/Pose3.h b/gtsam/geometry/Pose3.h index 6a7ed8456..805bc6012 100644 --- a/gtsam/geometry/Pose3.h +++ b/gtsam/geometry/Pose3.h @@ -213,6 +213,7 @@ public: * and in [Hairer06book] in formula (4.5), pg. 84, Lemma 4.2 * Ernst Hairer, et al., Geometric Numerical Integration, * Structure-Preserving Algorithms for Ordinary Differential Equations, 2nd edition, Springer-Verlag, 2006. + * See also Iserles05an, pg. 33, formula 2.46 */ static Matrix6 dExpInv_exp(const Vector6 &xi); diff --git a/gtsam/geometry/Rot2.h b/gtsam/geometry/Rot2.h index d5ea6afdc..21a402426 100644 --- a/gtsam/geometry/Rot2.h +++ b/gtsam/geometry/Rot2.h @@ -175,6 +175,16 @@ namespace gtsam { return v; } + /// Left-trivialized derivative of the exponential map + static Matrix dexpL(const Vector& v) { + return ones(1); + } + + /// Left-trivialized derivative inverse of the exponential map + static Matrix dexpInvL(const Vector& v) { + return ones(1); + } + /// @} /// @name Group Action on Point2 /// @{ diff --git a/gtsam/geometry/tests/testPose2.cpp b/gtsam/geometry/tests/testPose2.cpp index f12d96899..5497f6e28 100644 --- a/gtsam/geometry/tests/testPose2.cpp +++ b/gtsam/geometry/tests/testPose2.cpp @@ -32,7 +32,7 @@ using namespace boost::assign; using namespace gtsam; using namespace std; -// #define SLOW_BUT_CORRECT_EXPMAP +//#define SLOW_BUT_CORRECT_EXPMAP GTSAM_CONCEPT_TESTABLE_INST(Pose2) GTSAM_CONCEPT_LIE_INST(Pose2) @@ -143,7 +143,7 @@ TEST(Pose2, expmap0d) { // test case for screw motion in the plane namespace screw { double w=0.3; - Vector xi = (Vector(3) << 0.0, w, w); + Vector xi = (Vector(3) << 0.0, w, w).finished(); Rot2 expectedR = Rot2::fromAngle(w); Point2 expectedT(-0.0446635, 0.29552); Pose2 expected(expectedR, expectedT); @@ -192,6 +192,28 @@ TEST(Pose2, logmap_full) { EXPECT(assert_equal(expected, actual, 1e-5)); } +/* ************************************************************************* */ +Vector w = (Vector(3) << 0.1, 0.27, -0.2).finished(); + +// Left trivialization Derivative of exp(w) over w: How exp(w) changes when w changes? +// We find y such that: exp(w) exp(y) = exp(w + dw) for dw --> 0 +// => y = log (exp(-w) * exp(w+dw)) +Vector3 testDexpL(const Vector& dw) { + Vector3 y = Pose2::Logmap(Pose2::Expmap(-w) * Pose2::Expmap(w + dw)); + return y; +} + +TEST( Pose2, dexpL) { + Matrix actualDexpL = Pose2::dexpL(w); + Matrix expectedDexpL = numericalDerivative11( + boost::function( + boost::bind(testDexpL, _1)), Vector(zero(3)), 1e-2); + EXPECT(assert_equal(expectedDexpL, actualDexpL, 1e-5)); + + Matrix actualDexpInvL = Pose2::dexpInvL(w); + EXPECT(assert_equal(expectedDexpL.inverse(), actualDexpInvL, 1e-5)); +} + /* ************************************************************************* */ static Point2 transform_to_proxy(const Pose2& pose, const Point2& point) { return pose.transform_to(point); diff --git a/gtsam/geometry/tests/testRot2.cpp b/gtsam/geometry/tests/testRot2.cpp index cfb103c5d..2b9e5c046 100644 --- a/gtsam/geometry/tests/testRot2.cpp +++ b/gtsam/geometry/tests/testRot2.cpp @@ -155,6 +155,28 @@ TEST( Rot2, relativeBearing ) CHECK(assert_equal(expectedH,actualH)); } +/* ************************************************************************* */ +Vector w = (Vector(1) << 0.27).finished(); + +// Left trivialization Derivative of exp(w) over w: How exp(w) changes when w changes? +// We find y such that: exp(w) exp(y) = exp(w + dw) for dw --> 0 +// => y = log (exp(-w) * exp(w+dw)) +Vector1 testDexpL(const Vector& dw) { + Vector1 y = Rot2::Logmap(Rot2::Expmap(-w) * Rot2::Expmap(w + dw)); + return y; +} + +TEST( Rot2, dexpL) { + Matrix actualDexpL = Rot2::dexpL(w); + Matrix expectedDexpL = numericalDerivative11( + boost::function( + boost::bind(testDexpL, _1)), Vector(zero(1)), 1e-2); + EXPECT(assert_equal(expectedDexpL, actualDexpL, 1e-5)); + + Matrix actualDexpInvL = Rot2::dexpInvL(w); + EXPECT(assert_equal(expectedDexpL.inverse(), actualDexpInvL, 1e-5)); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/linear/GaussianFactor.h b/gtsam/linear/GaussianFactor.h index 8c8e2cb2b..d7a793d50 100644 --- a/gtsam/linear/GaussianFactor.h +++ b/gtsam/linear/GaussianFactor.h @@ -133,6 +133,9 @@ namespace gtsam { /// A'*b for Jacobian, eta for Hessian (raw memory version) virtual void gradientAtZero(double* d) const = 0; + /// Gradient wrt a key at any values + virtual Vector gradient(Key key, const VectorValues& x) const = 0; + private: /** Serialization function */ friend class boost::serialization::access; diff --git a/gtsam/linear/GaussianFactorGraph.cpp b/gtsam/linear/GaussianFactorGraph.cpp index 54e721cd7..2879e0a4c 100644 --- a/gtsam/linear/GaussianFactorGraph.cpp +++ b/gtsam/linear/GaussianFactorGraph.cpp @@ -254,6 +254,7 @@ namespace gtsam { map GaussianFactorGraph::hessianBlockDiagonal() const { map blocks; BOOST_FOREACH(const sharedFactor& factor, *this) { + if (!factor) continue; map BD = factor->hessianBlockDiagonal(); map::const_iterator it = BD.begin(); for(;it!=BD.end();it++) { @@ -303,6 +304,7 @@ namespace gtsam { // Zero-out the gradient VectorValues g; BOOST_FOREACH(const sharedFactor& factor, *this) { + if (!factor) continue; VectorValues gi = factor->gradientAtZero(); g.addInPlace_(gi); } @@ -393,15 +395,15 @@ namespace gtsam { /* ************************************************************************* */ // x += alpha*A'*e -void GaussianFactorGraph::transposeMultiplyAdd(double alpha, const Errors& e, - VectorValues& x) const { - // For each factor add the gradient contribution - Errors::const_iterator ei = e.begin(); - BOOST_FOREACH(const sharedFactor& Ai_G, *this) { - JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G); - Ai->transposeMultiplyAdd(alpha, *(ei++), x); + void GaussianFactorGraph::transposeMultiplyAdd(double alpha, const Errors& e, + VectorValues& x) const { + // For each factor add the gradient contribution + Errors::const_iterator ei = e.begin(); + BOOST_FOREACH(const sharedFactor& Ai_G, *this) { + JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G); + Ai->transposeMultiplyAdd(alpha, *(ei++), x); + } } -} ///* ************************************************************************* */ //void residual(const GaussianFactorGraph& fg, const VectorValues &x, VectorValues &r) { diff --git a/gtsam/linear/HessianFactor.cpp b/gtsam/linear/HessianFactor.cpp index f282682b3..6d7f3c2e1 100644 --- a/gtsam/linear/HessianFactor.cpp +++ b/gtsam/linear/HessianFactor.cpp @@ -616,6 +616,30 @@ void HessianFactor::gradientAtZero(double* d) const { } } +/* ************************************************************************* */ +Vector HessianFactor::gradient(Key key, const VectorValues& x) const { + Factor::const_iterator i = find(key); + // Sum over G_ij*xj for all xj connecting to xi + Vector b = zero(x.at(key).size()); + for (Factor::const_iterator j = begin(); j != end(); ++j) { + // Obtain Gij from the Hessian factor + // Hessian factor only stores an upper triangular matrix, so be careful when i>j + Matrix Gij; + if (i > j) { + Matrix Gji = info(j, i); + Gij = Gji.transpose(); + } + else { + Gij = info(i, j); + } + // Accumulate Gij*xj to gradf + b += Gij * x.at(*j); + } + // Subtract the linear term gi + b += -linearTerm(i); + return b; +} + /* ************************************************************************* */ std::pair, boost::shared_ptr > EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys) diff --git a/gtsam/linear/HessianFactor.h b/gtsam/linear/HessianFactor.h index 62d84925c..8af5277e2 100644 --- a/gtsam/linear/HessianFactor.h +++ b/gtsam/linear/HessianFactor.h @@ -389,6 +389,12 @@ namespace gtsam { virtual void gradientAtZero(double* d) const; + /** + * Compute the gradient at a key: + * \grad f(x_i) = \sum_j G_ij*x_j - g_i + */ + Vector gradient(Key key, const VectorValues& x) const; + /** * Densely partially eliminate with Cholesky factorization. JacobianFactors are * left-multiplied with their transpose to form the Hessian using the conversion constructor diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp index a63bbf473..566a98fc2 100644 --- a/gtsam/linear/JacobianFactor.cpp +++ b/gtsam/linear/JacobianFactor.cpp @@ -575,7 +575,14 @@ VectorValues JacobianFactor::gradientAtZero() const { /* ************************************************************************* */ void JacobianFactor::gradientAtZero(double* d) const { - //throw std::runtime_error("gradientAtZero not implemented for Jacobian factor"); + throw std::runtime_error("Raw memory version of gradientAtZero not implemented for Jacobian factor"); +} + +/* ************************************************************************* */ +Vector JacobianFactor::gradient(Key key, const VectorValues& x) const { + // TODO: optimize it for JacobianFactor without converting to a HessianFactor + HessianFactor hessian(*this); + return hessian.gradient(key, x); } /* ************************************************************************* */ diff --git a/gtsam/linear/JacobianFactor.h b/gtsam/linear/JacobianFactor.h index 92f43b6be..d33c5e07c 100644 --- a/gtsam/linear/JacobianFactor.h +++ b/gtsam/linear/JacobianFactor.h @@ -230,7 +230,9 @@ namespace gtsam { virtual bool empty() const { return size() == 0 /*|| rows() == 0*/; } /** is noise model constrained ? */ - bool isConstrained() const { return model_->isConstrained(); } + bool isConstrained() const { + return model_ && model_->isConstrained(); + } /** Return the dimension of the variable pointed to by the given key iterator * todo: Remove this in favor of keeping track of dimensions with variables? @@ -288,9 +290,12 @@ namespace gtsam { /// A'*b for Jacobian VectorValues gradientAtZero() const; - /* ************************************************************************* */ + /// A'*b for Jacobian (raw memory version) virtual void gradientAtZero(double* d) const; + /// Compute the gradient wrt a key at any values + Vector gradient(Key key, const VectorValues& x) const; + /** Return a whitened version of the factor, i.e. with unit diagonal noise model. */ JacobianFactor whiten() const; diff --git a/gtsam/linear/tests/testHessianFactor.cpp b/gtsam/linear/tests/testHessianFactor.cpp index cba65580e..3a2cd0fd4 100644 --- a/gtsam/linear/tests/testHessianFactor.cpp +++ b/gtsam/linear/tests/testHessianFactor.cpp @@ -449,6 +449,32 @@ TEST(HessianFactor, gradientAtZero) EXPECT(assert_equal(expectedG, actualG)); } +/* ************************************************************************* */ +TEST(HessianFactor, gradient) +{ + Matrix G11 = (Matrix(1, 1) << 1).finished(); + Matrix G12 = (Matrix(1, 2) << 0, 0).finished(); + Matrix G22 = (Matrix(2, 2) << 1, 0, 0, 1).finished(); + Vector g1 = (Vector(1) << -7).finished(); + Vector g2 = (Vector(2) << -8, -9).finished(); + double f = 194; + + HessianFactor factor(0, 1, G11, G12, g1, G22, g2, f); + + // test gradient + Vector x0 = (Vector(1) << 3.0).finished(); + Vector x1 = (Vector(2) << -3.5, 7.1).finished(); + VectorValues x = pair_list_of(0, x0) (1, x1); + + Vector expectedGrad0 = (Vector(1) << 10.0).finished(); + Vector expectedGrad1 = (Vector(2) << 4.5, 16.1).finished(); + Vector grad0 = factor.gradient(0, x); + Vector grad1 = factor.gradient(1, x); + + EXPECT(assert_equal(expectedGrad0, grad0)); + EXPECT(assert_equal(expectedGrad1, grad1)); +} + /* ************************************************************************* */ TEST(HessianFactor, hessianDiagonal) { diff --git a/gtsam/slam/RegularImplicitSchurFactor.h b/gtsam/slam/RegularImplicitSchurFactor.h index ecf8adfec..243331dc1 100644 --- a/gtsam/slam/RegularImplicitSchurFactor.h +++ b/gtsam/slam/RegularImplicitSchurFactor.h @@ -453,6 +453,12 @@ public: } } + /// Gradient wrt a key at any values + Vector gradient(Key key, const VectorValues& x) const { + throw std::runtime_error("gradient for RegularImplicitSchurFactor is not implemented yet"); + } + + }; // RegularImplicitSchurFactor diff --git a/gtsam_unstable/CMakeLists.txt b/gtsam_unstable/CMakeLists.txt index cd42b8d4f..6abfe4336 100644 --- a/gtsam_unstable/CMakeLists.txt +++ b/gtsam_unstable/CMakeLists.txt @@ -3,6 +3,7 @@ set (gtsam_unstable_subdirs base geometry + linear discrete dynamics nonlinear @@ -47,6 +48,7 @@ endforeach(subdir) set(gtsam_unstable_srcs ${base_srcs} ${geometry_srcs} + ${linear_srcs} ${discrete_srcs} ${dynamics_srcs} ${nonlinear_srcs} diff --git a/gtsam_unstable/linear/CMakeLists.txt b/gtsam_unstable/linear/CMakeLists.txt new file mode 100644 index 000000000..99a4b814e --- /dev/null +++ b/gtsam_unstable/linear/CMakeLists.txt @@ -0,0 +1,6 @@ +# Install headers +file(GLOB linear_headers "*.h") +install(FILES ${linear_headers} DESTINATION include/gtsam_unstable/linear) + +# Add all tests +add_subdirectory(tests) diff --git a/gtsam_unstable/linear/LinearEquality.h b/gtsam_unstable/linear/LinearEquality.h new file mode 100644 index 000000000..e6fec98b7 --- /dev/null +++ b/gtsam_unstable/linear/LinearEquality.h @@ -0,0 +1,120 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * LinearEquality.h + * @brief: LinearEquality derived from Base with constrained noise model + * @date: Nov 27, 2014 + * @author: thduynguyen + */ + +#pragma once + +#include + +namespace gtsam { + +/** + * This class defines Linear constraints by inherit Base + * with the special Constrained noise model + */ +class LinearEquality: public JacobianFactor { +public: + typedef LinearEquality This; ///< Typedef to this class + typedef JacobianFactor Base; ///< Typedef to base class + typedef boost::shared_ptr shared_ptr; ///< shared_ptr to this class + +private: + Key dualKey_; + +public: + /** default constructor for I/O */ + LinearEquality() : + Base() { + } + + /** Conversion from HessianFactor (does Cholesky to obtain Jacobian matrix) */ + explicit LinearEquality(const HessianFactor& hf) { + throw std::runtime_error("Cannot convert HessianFactor to LinearEquality"); + } + + /** Construct unary factor */ + LinearEquality(Key i1, const Matrix& A1, const Vector& b, Key dualKey) : + Base(i1, A1, b, noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) { + } + + /** Construct binary factor */ + LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, + const Vector& b, Key dualKey) : + Base(i1, A1, i2, A2, b, noiseModel::Constrained::All(b.rows())), dualKey_( + dualKey) { + } + + /** Construct ternary factor */ + LinearEquality(Key i1, const Matrix& A1, Key i2, const Matrix& A2, Key i3, + const Matrix& A3, const Vector& b, Key dualKey) : + Base(i1, A1, i2, A2, i3, A3, b, noiseModel::Constrained::All(b.rows())), dualKey_( + dualKey) { + } + + /** Construct an n-ary factor + * @tparam TERMS A container whose value type is std::pair, specifying the + * collection of keys and matrices making up the factor. */ + template + LinearEquality(const TERMS& terms, const Vector& b, Key dualKey) : + Base(terms, b, noiseModel::Constrained::All(b.rows())), dualKey_(dualKey) { + } + + /** Virtual destructor */ + virtual ~LinearEquality() { + } + + /** equals */ + virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const { + return Base::equals(lf, tol); + } + + /** print */ + virtual void print(const std::string& s = "", const KeyFormatter& formatter = + DefaultKeyFormatter) const { + Base::print(s, formatter); + } + + /** Clone this LinearEquality */ + virtual GaussianFactor::shared_ptr clone() const { + return boost::static_pointer_cast( + boost::make_shared(*this)); + } + + /// dual key + Key dualKey() const { return dualKey_; } + + /// for active set method: equality constraints are always active + bool active() const { return true; } + + /** Special error_vector for constraints (A*x-b) */ + Vector error_vector(const VectorValues& c) const { + return unweighted_error(c); + } + + /** Special error for constraints. + * I think it should be zero, as this function is meant for objective cost. + * But the name "error" can be misleading. + * TODO: confirm with Frank!! */ + virtual double error(const VectorValues& c) const { + return 0.0; + } + +}; +// LinearEquality + +}// gtsam + diff --git a/gtsam_unstable/linear/LinearEqualityFactorGraph.h b/gtsam_unstable/linear/LinearEqualityFactorGraph.h new file mode 100644 index 000000000..fd9191250 --- /dev/null +++ b/gtsam_unstable/linear/LinearEqualityFactorGraph.h @@ -0,0 +1,32 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * LinearEqualityFactorGraph.h + * @brief: Factor graph of all LinearEquality factors + * @date: Dec 8, 2014 + * @author: thduynguyen + */ + +#pragma once + +#include +#include + +namespace gtsam { + +class LinearEqualityFactorGraph : public FactorGraph { +public: + typedef boost::shared_ptr shared_ptr; +}; + +} // namespace gtsam + diff --git a/gtsam_unstable/linear/LinearInequality.h b/gtsam_unstable/linear/LinearInequality.h new file mode 100644 index 000000000..24ad8545c --- /dev/null +++ b/gtsam_unstable/linear/LinearInequality.h @@ -0,0 +1,143 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * LinearInequality.h + * @brief: LinearInequality derived from Base with constrained noise model + * @date: Nov 27, 2014 + * @author: thduynguyen + */ + +#pragma once + +#include + +namespace gtsam { + +typedef Eigen::RowVectorXd RowVector; + +/** + * This class defines Linear constraints by inherit Base + * with the special Constrained noise model + */ +class LinearInequality: public JacobianFactor { +public: + typedef LinearInequality This; ///< Typedef to this class + typedef JacobianFactor Base; ///< Typedef to base class + typedef boost::shared_ptr shared_ptr; ///< shared_ptr to this class + +private: + Key dualKey_; + bool active_; + +public: + /** default constructor for I/O */ + LinearInequality() : + Base(), active_(true) { + } + + /** Conversion from HessianFactor (does Cholesky to obtain Jacobian matrix) */ + explicit LinearInequality(const HessianFactor& hf) { + throw std::runtime_error( + "Cannot convert HessianFactor to LinearInequality"); + } + + /** Construct unary factor */ + LinearInequality(Key i1, const RowVector& A1, double b, Key dualKey) : + Base(i1, A1, (Vector(1) << b).finished(), noiseModel::Constrained::All(1)), dualKey_( + dualKey), active_(true) { + } + + /** Construct binary factor */ + LinearInequality(Key i1, const RowVector& A1, Key i2, const RowVector& A2, double b, + Key dualKey) : + Base(i1, A1, i2, A2, (Vector(1) << b).finished(), noiseModel::Constrained::All(1)), dualKey_( + dualKey), active_(true) { + } + + /** Construct ternary factor */ + LinearInequality(Key i1, const RowVector& A1, Key i2, const RowVector& A2, Key i3, + const RowVector& A3, double b, Key dualKey) : + Base(i1, A1, i2, A2, i3, A3, (Vector(1) << b).finished(), + noiseModel::Constrained::All(1)), dualKey_(dualKey), active_(true) { + } + + /** Construct an n-ary factor + * @tparam TERMS A container whose value type is std::pair, specifying the + * collection of keys and matrices making up the factor. */ + template + LinearInequality(const TERMS& terms, double b, Key dualKey) : + Base(terms, (Vector(1) << b).finished(), noiseModel::Constrained::All(1)), dualKey_( + dualKey), active_(true) { + } + + /** Virtual destructor */ + virtual ~LinearInequality() { + } + + /** equals */ + virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const { + return Base::equals(lf, tol); + } + + /** print */ + virtual void print(const std::string& s = "", const KeyFormatter& formatter = + DefaultKeyFormatter) const { + if (active()) + Base::print(s + " Active", formatter); + else + Base::print(s + " Inactive", formatter); + } + + /** Clone this LinearInequality */ + virtual GaussianFactor::shared_ptr clone() const { + return boost::static_pointer_cast( + boost::make_shared(*this)); + } + + /// dual key + Key dualKey() const { return dualKey_; } + + /// return true if this constraint is active + bool active() const { return active_; } + + /// Make this inequality constraint active + void activate() { active_ = true; } + + /// Make this inequality constraint inactive + void inactivate() { active_ = false; } + + /** Special error_vector for constraints (A*x-b) */ + Vector error_vector(const VectorValues& c) const { + return unweighted_error(c); + } + + /** Special error for single-valued inequality constraints. */ + virtual double error(const VectorValues& c) const { + return error_vector(c)[0]; + } + + /** dot product of row s with the corresponding vector in p */ + double dotProductRow(const VectorValues& p) const { + double aTp = 0.0; + for (const_iterator xj = begin(); xj != end(); ++xj) { + Vector pj = p.at(*xj); + Vector aj = getA(xj).transpose(); + aTp += aj.dot(pj); + } + return aTp; + } + +}; +// LinearInequality + +}// gtsam + diff --git a/gtsam_unstable/linear/LinearInequalityFactorGraph.h b/gtsam_unstable/linear/LinearInequalityFactorGraph.h new file mode 100644 index 000000000..b72f2cc3c --- /dev/null +++ b/gtsam_unstable/linear/LinearInequalityFactorGraph.h @@ -0,0 +1,47 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * LinearInequalityFactorGraph.h + * @brief: Factor graph of all LinearInequality factors + * @date: Dec 8, 2014 + * @author: thduynguyen + */ + +#pragma once + +#include +#include + +namespace gtsam { + +class LinearInequalityFactorGraph: public FactorGraph { +private: + typedef FactorGraph Base; + +public: + typedef boost::shared_ptr shared_ptr; + + /** print */ + void print(const std::string& str, const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const { + Base::print(str, keyFormatter); + } + + /** equals */ + bool equals(const LinearInequalityFactorGraph& other, + double tol = 1e-9) const { + return Base::equals(other, tol); + } +}; + +} // namespace gtsam + diff --git a/gtsam_unstable/linear/QP.h b/gtsam_unstable/linear/QP.h new file mode 100644 index 000000000..111ab506f --- /dev/null +++ b/gtsam_unstable/linear/QP.h @@ -0,0 +1,58 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * QP.h + * @brief: Factor graphs of a Quadratic Programming problem + * @date: Dec 8, 2014 + * @author: thduynguyen + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { + +/** + * struct contains factor graphs of a Quadratic Programming problem + */ +struct QP { + GaussianFactorGraph cost; //!< Quadratic cost factors + LinearEqualityFactorGraph equalities; //!< linear equality constraints + LinearInequalityFactorGraph inequalities; //!< linear inequality constraints + + /** default constructor */ + QP() : + cost(), equalities(), inequalities() { + } + + /** constructor */ + QP(const GaussianFactorGraph& _cost, + const LinearEqualityFactorGraph& _linearEqualities, + const LinearInequalityFactorGraph& _linearInequalities) : + cost(_cost), equalities(_linearEqualities), inequalities( + _linearInequalities) { + } + + /** print */ + void print(const std::string& s = "") { + std::cout << s << std::endl; + cost.print("Quadratic cost factors: "); + equalities.print("Linear equality factors: "); + inequalities.print("Linear inequality factors: "); + } +}; + +} // namespace gtsam + diff --git a/gtsam_unstable/linear/QPSolver.cpp b/gtsam_unstable/linear/QPSolver.cpp new file mode 100644 index 000000000..f0eb7d7fb --- /dev/null +++ b/gtsam_unstable/linear/QPSolver.cpp @@ -0,0 +1,252 @@ +/* + * QPSolver.cpp + * @brief: + * @date: Apr 15, 2014 + * @author: thduynguyen + */ + +#include +#include +#include + +#include + +using namespace std; + +namespace gtsam { + +//****************************************************************************** +QPSolver::QPSolver(const QP& qp) : qp_(qp) { + baseGraph_ = qp_.cost; + baseGraph_.push_back(qp_.equalities.begin(), qp_.equalities.end()); + costVariableIndex_ = VariableIndex(qp_.cost); + equalityVariableIndex_ = VariableIndex(qp_.equalities); + inequalityVariableIndex_ = VariableIndex(qp_.inequalities); + constrainedKeys_ = qp_.equalities.keys(); + constrainedKeys_.merge(qp_.inequalities.keys()); +} + +//****************************************************************************** +VectorValues QPSolver::solveWithCurrentWorkingSet( + const LinearInequalityFactorGraph& workingSet) const { + GaussianFactorGraph workingGraph = baseGraph_; + BOOST_FOREACH(const LinearInequality::shared_ptr& factor, workingSet) { + if (factor->active()) + workingGraph.push_back(factor); + } + return workingGraph.optimize(); +} + +//****************************************************************************** +JacobianFactor::shared_ptr QPSolver::createDualFactor(Key key, + const LinearInequalityFactorGraph& workingSet, const VectorValues& delta) const { + + // Transpose the A matrix of constrained factors to have the jacobian of the dual key + std::vector > Aterms = collectDualJacobians + < LinearEquality > (key, qp_.equalities, equalityVariableIndex_); + std::vector > AtermsInequalities = collectDualJacobians + < LinearInequality > (key, workingSet, inequalityVariableIndex_); + Aterms.insert(Aterms.end(), AtermsInequalities.begin(), + AtermsInequalities.end()); + + // Collect the gradients of unconstrained cost factors to the b vector + if (Aterms.size() > 0) { + Vector b = zero(delta.at(key).size()); + if (costVariableIndex_.find(key) != costVariableIndex_.end()) { + BOOST_FOREACH(size_t factorIx, costVariableIndex_[key]) { + GaussianFactor::shared_ptr factor = qp_.cost.at(factorIx); + b += factor->gradient(key, delta); + } + } + return boost::make_shared(Aterms, b, noiseModel::Constrained::All(b.rows())); + } + else { + return boost::make_shared(); + } +} + +//****************************************************************************** +GaussianFactorGraph::shared_ptr QPSolver::buildDualGraph( + const LinearInequalityFactorGraph& workingSet, const VectorValues& delta) const { + GaussianFactorGraph::shared_ptr dualGraph(new GaussianFactorGraph()); + BOOST_FOREACH(Key key, constrainedKeys_) { + // Each constrained key becomes a factor in the dual graph + JacobianFactor::shared_ptr dualFactor = createDualFactor(key, workingSet, delta); + if (!dualFactor->empty()) + dualGraph->push_back(dualFactor); + } + return dualGraph; +} + +//****************************************************************************** +int QPSolver::identifyLeavingConstraint( + const LinearInequalityFactorGraph& workingSet, + const VectorValues& lambdas) const { + int worstFactorIx = -1; + // preset the maxLambda to 0.0: if lambda is <= 0.0, the constraint is either + // inactive or a good inequality constraint, so we don't care! + double maxLambda = 0.0; + for (size_t factorIx = 0; factorIx < workingSet.size(); ++factorIx) { + const LinearInequality::shared_ptr& factor = workingSet.at(factorIx); + if (factor->active()) { + double lambda = lambdas.at(factor->dualKey())[0]; + if (lambda > maxLambda) { + worstFactorIx = factorIx; + maxLambda = lambda; + } + } + } + return worstFactorIx; +} + +//****************************************************************************** +/* We have to make sure the new solution with alpha satisfies all INACTIVE inequality constraints + * If some inactive inequality constraints complain about the full step (alpha = 1), + * we have to adjust alpha to stay within the inequality constraints' feasible regions. + * + * For each inactive inequality j: + * - We already have: aj'*xk - bj <= 0, since xk satisfies all inequality constraints + * - We want: aj'*(xk + alpha*p) - bj <= 0 + * - If aj'*p <= 0, we have: aj'*(xk + alpha*p) <= aj'*xk <= bj, for all alpha>0 + * it's good! + * - We only care when aj'*p > 0. In this case, we need to choose alpha so that + * aj'*xk + alpha*aj'*p - bj <= 0 --> alpha <= (bj - aj'*xk) / (aj'*p) + * We want to step as far as possible, so we should choose alpha = (bj - aj'*xk) / (aj'*p) + * + * We want the minimum of all those alphas among all inactive inequality. + */ +boost::tuple QPSolver::computeStepSize( + const LinearInequalityFactorGraph& workingSet, const VectorValues& xk, + const VectorValues& p) const { + static bool debug = false; + + double minAlpha = 1.0; + int closestFactorIx = -1; + for(size_t factorIx = 0; factorIxgetb()[0]; + // only check inactive factors + if (!factor->active()) { + // Compute a'*p + double aTp = factor->dotProductRow(p); + + // Check if a'*p >0. Don't care if it's not. + if (aTp <= 0) + continue; + + // Compute a'*xk + double aTx = factor->dotProductRow(xk); + + // alpha = (b - a'*xk) / (a'*p) + double alpha = (b - aTx) / aTp; + if (debug) + cout << "alpha: " << alpha << endl; + + // We want the minimum of all those max alphas + if (alpha < minAlpha) { + closestFactorIx = factorIx; + minAlpha = alpha; + } + } + + } + + return boost::make_tuple(minAlpha, closestFactorIx); +} + +//****************************************************************************** +QPState QPSolver::iterate(const QPState& state) const { + static bool debug = false; + + // Solve with the current working set + VectorValues newValues = solveWithCurrentWorkingSet(state.workingSet); + if (debug) + newValues.print("New solution:"); + + // If we CAN'T move further + if (newValues.equals(state.values, 1e-5)) { + // Compute lambda from the dual graph + if (debug) + cout << "Building dual graph..." << endl; + GaussianFactorGraph::shared_ptr dualGraph = buildDualGraph(state.workingSet, newValues); + if (debug) + dualGraph->print("Dual graph: "); + VectorValues duals = dualGraph->optimize(); + if (debug) + duals.print("Duals :"); + + int leavingFactor = identifyLeavingConstraint(state.workingSet, duals); + if (debug) + cout << "leavingFactor: " << leavingFactor << endl; + + // If all inequality constraints are satisfied: We have the solution!! + if (leavingFactor < 0) { + return QPState(newValues, duals, state.workingSet, true); + } + else { + // Inactivate the leaving constraint + LinearInequalityFactorGraph newWorkingSet = state.workingSet; + newWorkingSet.at(leavingFactor)->inactivate(); + return QPState(newValues, duals, newWorkingSet, false); + } + } + else { + // If we CAN make some progress + // Adapt stepsize if some inactive constraints complain about this move + double alpha; + int factorIx; + VectorValues p = newValues - state.values; + boost::tie(alpha, factorIx) = // + computeStepSize(state.workingSet, state.values, p); + if (debug) + cout << "alpha, factorIx: " << alpha << " " << factorIx << " " + << endl; + + // also add to the working set the one that complains the most + LinearInequalityFactorGraph newWorkingSet = state.workingSet; + if (factorIx >= 0) + newWorkingSet.at(factorIx)->activate(); + + // step! + newValues = state.values + alpha * p; + + return QPState(newValues, state.duals, newWorkingSet, false); + } +} + +//****************************************************************************** +LinearInequalityFactorGraph QPSolver::identifyActiveConstraints( + const LinearInequalityFactorGraph& inequalities, + const VectorValues& initialValues) const { + LinearInequalityFactorGraph workingSet; + BOOST_FOREACH(const LinearInequality::shared_ptr& factor, inequalities){ + LinearInequality::shared_ptr workingFactor(new LinearInequality(*factor)); + double error = workingFactor->error(initialValues); + if (fabs(error)>1e-7){ + workingFactor->inactivate(); + } else { + workingFactor->activate(); + } + workingSet.push_back(workingFactor); + } + return workingSet; +} + +//****************************************************************************** +pair QPSolver::optimize( + const VectorValues& initialValues) const { + + // Initialize workingSet from the feasible initialValues + LinearInequalityFactorGraph workingSet = + identifyActiveConstraints(qp_.inequalities, initialValues); + QPState state(initialValues, VectorValues(), workingSet, false); + + /// main loop of the solver + while (!state.converged) { + state = iterate(state); + } + + return make_pair(state.values, state.duals); +} + +} /* namespace gtsam */ diff --git a/gtsam_unstable/linear/QPSolver.h b/gtsam_unstable/linear/QPSolver.h new file mode 100644 index 000000000..f7a575f8c --- /dev/null +++ b/gtsam_unstable/linear/QPSolver.h @@ -0,0 +1,188 @@ +/* + * QPSolver.h + * @brief: A quadratic programming solver implements the active set method + * @date: Apr 15, 2014 + * @author: thduynguyen + */ + +#pragma once + +#include +#include + +#include +#include + +namespace gtsam { + +/// This struct holds the state of QPSolver at each iteration +struct QPState { + VectorValues values; + VectorValues duals; + LinearInequalityFactorGraph workingSet; + bool converged; + + /// default constructor + QPState() : + values(), duals(), workingSet(), converged(false) { + } + + /// constructor with initial values + QPState(const VectorValues& initialValues, const VectorValues& initialDuals, + const LinearInequalityFactorGraph& initialWorkingSet, bool _converged) : + values(initialValues), duals(initialDuals), workingSet(initialWorkingSet), converged( + _converged) { + } +}; + +/** + * This class implements the active set method to solve quadratic programming problems + * encoded in a GaussianFactorGraph with special mixed constrained noise models, in which + * a negative sigma denotes an inequality <=0 constraint, + * a zero sigma denotes an equality =0 constraint, + * and a positive sigma denotes a normal Gaussian noise model. + */ +class QPSolver { + + const QP& qp_; //!< factor graphs of the QP problem, can't be modified! + GaussianFactorGraph baseGraph_; //!< factor graphs of cost factors and linear equalities. The working set of inequalities will be added to this base graph in the process. + VariableIndex costVariableIndex_, equalityVariableIndex_, + inequalityVariableIndex_; + FastSet constrainedKeys_; //!< all constrained keys, will become factors in the dual graph + +public: + /// Constructor + QPSolver(const QP& qp); + + /// Find solution with the current working set + VectorValues solveWithCurrentWorkingSet( + const LinearInequalityFactorGraph& workingSet) const; + + /// @name Build the dual graph + /// @{ + + /// Collect the Jacobian terms for a dual factor + template + std::vector > collectDualJacobians(Key key, + const FactorGraph& graph, + const VariableIndex& variableIndex) const { + std::vector > Aterms; + if (variableIndex.find(key) != variableIndex.end()) { + BOOST_FOREACH(size_t factorIx, variableIndex[key]){ + typename FACTOR::shared_ptr factor = graph.at(factorIx); + if (!factor->active()) continue; + Matrix Ai = factor->getA(factor->find(key)).transpose(); + Aterms.push_back(std::make_pair(factor->dualKey(), Ai)); + } + } + return Aterms; + } + + /// Create a dual factor + JacobianFactor::shared_ptr createDualFactor(Key key, + const LinearInequalityFactorGraph& workingSet, + const VectorValues& delta) const; + + /** + * Build the dual graph to solve for the Lagrange multipliers. + * + * The Lagrangian function is: + * L(X,lambdas) = f(X) - \sum_k lambda_k * c_k(X), + * where the unconstrained part is + * f(X) = 0.5*X'*G*X - X'*g + 0.5*f0 + * and the linear equality constraints are + * c1(X), c2(X), ..., cm(X) + * + * Take the derivative of L wrt X at the solution and set it to 0, we have + * \grad f(X) = \sum_k lambda_k * \grad c_k(X) (*) + * + * For each set of rows of (*) corresponding to a variable xi involving in some constraints + * we have: + * \grad f(xi) = \frac{\partial f}{\partial xi}' = \sum_j G_ij*xj - gi + * \grad c_k(xi) = \frac{\partial c_k}{\partial xi}' + * + * Note: If xi does not involve in any constraint, we have the trivial condition + * \grad f(Xi) = 0, which should be satisfied as a usual condition for unconstrained variables. + * + * So each variable xi involving in some constraints becomes a linear factor A*lambdas - b = 0 + * on the constraints' lambda multipliers, as follows: + * - The jacobian term A_k for each lambda_k is \grad c_k(xi) + * - The constant term b is \grad f(xi), which can be computed from all unconstrained + * Hessian factors connecting to xi: \grad f(xi) = \sum_j G_ij*xj - gi + */ + GaussianFactorGraph::shared_ptr buildDualGraph( + const LinearInequalityFactorGraph& workingSet, + const VectorValues& delta) const; + + /// @} + + /** + * The goal of this function is to find currently active inequality constraints + * that violate the condition to be active. The one that violates the condition + * the most will be removed from the active set. See Nocedal06book, pg 469-471 + * + * Find the BAD active inequality that pulls x strongest to the wrong direction + * of its constraint (i.e. it is pulling towards >0, while its feasible region is <=0) + * + * For active inequality constraints (those that are enforced as equality constraints + * in the current working set), we want lambda < 0. + * This is because: + * - From the Lagrangian L = f - lambda*c, we know that the constraint force + * is (lambda * \grad c) = \grad f. Intuitively, to keep the solution x stay + * on the constraint surface, the constraint force has to balance out with + * other unconstrained forces that are pulling x towards the unconstrained + * minimum point. The other unconstrained forces are pulling x toward (-\grad f), + * hence the constraint force has to be exactly \grad f, so that the total + * force is 0. + * - We also know that at the constraint surface c(x)=0, \grad c points towards + (>= 0), + * while we are solving for - (<=0) constraint. + * - We want the constraint force (lambda * \grad c) to pull x towards the - (<=0) direction + * i.e., the opposite direction of \grad c where the inequality constraint <=0 is satisfied. + * That means we want lambda < 0. + * - This is because when the constrained force pulls x towards the infeasible region (+), + * the unconstrained force is pulling x towards the opposite direction into + * the feasible region (again because the total force has to be 0 to make x stay still) + * So we can drop this constraint to have a lower error but feasible solution. + * + * In short, active inequality constraints with lambda > 0 are BAD, because they + * violate the condition to be active. + * + * And we want to remove the worst one with the largest lambda from the active set. + * + */ + int identifyLeavingConstraint(const LinearInequalityFactorGraph& workingSet, + const VectorValues& lambdas) const; + + /** + * Compute step size alpha for the new solution x' = xk + alpha*p, where alpha \in [0,1] + * + * @return a tuple of (alpha, factorIndex, sigmaIndex) where (factorIndex, sigmaIndex) + * is the constraint that has minimum alpha, or (-1,-1) if alpha = 1. + * This constraint will be added to the working set and become active + * in the next iteration + */ + boost::tuple computeStepSize( + const LinearInequalityFactorGraph& workingSet, const VectorValues& xk, + const VectorValues& p) const; + + /** Iterate 1 step, return a new state with a new workingSet and values */ + QPState iterate(const QPState& state) const; + + /** + * Identify active constraints based on initial values. + */ + LinearInequalityFactorGraph identifyActiveConstraints( + const LinearInequalityFactorGraph& inequalities, + const VectorValues& initialValues) const; + + /** Optimize with a provided initial values + * For this version, it is the responsibility of the caller to provide + * a feasible initial value. + * @return a pair of solutions + */ + std::pair optimize( + const VectorValues& initialValues) const; + +}; + +} /* namespace gtsam */ diff --git a/gtsam_unstable/linear/tests/CMakeLists.txt b/gtsam_unstable/linear/tests/CMakeLists.txt new file mode 100644 index 000000000..43df23daa --- /dev/null +++ b/gtsam_unstable/linear/tests/CMakeLists.txt @@ -0,0 +1 @@ +gtsamAddTestsGlob(linear_unstable "test*.cpp" "" "gtsam_unstable") diff --git a/gtsam_unstable/linear/tests/testLinearEquality.cpp b/gtsam_unstable/linear/tests/testLinearEquality.cpp new file mode 100644 index 000000000..bf41743a2 --- /dev/null +++ b/gtsam_unstable/linear/tests/testLinearEquality.cpp @@ -0,0 +1,237 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testLinearEquality.cpp + * @brief Unit tests for LinearEquality + * @author thduynguyen + **/ + +#include +#include +#include +#include +#include + +#include +#include + +using namespace std; +using namespace gtsam; +using namespace boost::assign; + +GTSAM_CONCEPT_TESTABLE_INST(LinearEquality) + +namespace { + namespace simple { + // Terms we'll use + const vector > terms = list_of > + (make_pair(5, Matrix3::Identity())) + (make_pair(10, 2*Matrix3::Identity())) + (make_pair(15, 3*Matrix3::Identity())); + + // RHS and sigmas + const Vector b = (Vector(3) << 1., 2., 3.).finished(); + const SharedDiagonal noise = noiseModel::Constrained::All(3); + } +} + +/* ************************************************************************* */ +TEST(LinearEquality, constructors_and_accessors) +{ + using namespace simple; + + // Test for using different numbers of terms + { + // One term constructor + LinearEquality expected( + boost::make_iterator_range(terms.begin(), terms.begin() + 1), b, 0); + LinearEquality actual(terms[0].first, terms[0].second, b, 0); + EXPECT(assert_equal(expected, actual)); + LONGS_EQUAL((long)terms[0].first, (long)actual.keys().back()); + EXPECT(assert_equal(terms[0].second, actual.getA(actual.end() - 1))); + EXPECT(assert_equal(b, expected.getb())); + EXPECT(assert_equal(b, actual.getb())); + EXPECT(assert_equal(*noise, *actual.get_model())); + } + { + // Two term constructor + LinearEquality expected( + boost::make_iterator_range(terms.begin(), terms.begin() + 2), b, 0); + LinearEquality actual(terms[0].first, terms[0].second, + terms[1].first, terms[1].second, b, 0); + EXPECT(assert_equal(expected, actual)); + LONGS_EQUAL((long)terms[1].first, (long)actual.keys().back()); + EXPECT(assert_equal(terms[1].second, actual.getA(actual.end() - 1))); + EXPECT(assert_equal(b, expected.getb())); + EXPECT(assert_equal(b, actual.getb())); + EXPECT(assert_equal(*noise, *actual.get_model())); + } + { + // Three term constructor + LinearEquality expected( + boost::make_iterator_range(terms.begin(), terms.begin() + 3), b, 0); + LinearEquality actual(terms[0].first, terms[0].second, + terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, 0); + EXPECT(assert_equal(expected, actual)); + LONGS_EQUAL((long)terms[2].first, (long)actual.keys().back()); + EXPECT(assert_equal(terms[2].second, actual.getA(actual.end() - 1))); + EXPECT(assert_equal(b, expected.getb())); + EXPECT(assert_equal(b, actual.getb())); + EXPECT(assert_equal(*noise, *actual.get_model())); + } +} + +/* ************************************************************************* */ +TEST(LinearEquality, Hessian_conversion) { + HessianFactor hessian(0, (Matrix(4,4) << + 1.57, 2.695, -1.1, -2.35, + 2.695, 11.3125, -0.65, -10.225, + -1.1, -0.65, 1, 0.5, + -2.35, -10.225, 0.5, 9.25).finished(), + (Vector(4) << -7.885, -28.5175, 2.75, 25.675).finished(), + 73.1725); + + try { + LinearEquality actual(hessian); + EXPECT(false); + } + catch (const std::runtime_error& exception) { + EXPECT(true); + } +} + +/* ************************************************************************* */ +TEST(LinearEquality, error) +{ + LinearEquality factor(simple::terms, simple::b, 0); + + VectorValues values; + values.insert(5, Vector::Constant(3, 1.0)); + values.insert(10, Vector::Constant(3, 0.5)); + values.insert(15, Vector::Constant(3, 1.0/3.0)); + + Vector expected_unwhitened(3); expected_unwhitened << 2.0, 1.0, 0.0; + Vector actual_unwhitened = factor.unweighted_error(values); + EXPECT(assert_equal(expected_unwhitened, actual_unwhitened)); + + // whitened is meaningless in constraints + Vector expected_whitened(3); expected_whitened = expected_unwhitened; + Vector actual_whitened = factor.error_vector(values); + EXPECT(assert_equal(expected_whitened, actual_whitened)); + + double expected_error = 0.0; + double actual_error = factor.error(values); + DOUBLES_EQUAL(expected_error, actual_error, 1e-10); +} + +/* ************************************************************************* */ +TEST(LinearEquality, matrices_NULL) +{ + // Make sure everything works with NULL noise model + LinearEquality factor(simple::terms, simple::b, 0); + + Matrix AExpected(3, 9); + AExpected << simple::terms[0].second, simple::terms[1].second, simple::terms[2].second; + Vector rhsExpected = simple::b; + Matrix augmentedJacobianExpected(3, 10); + augmentedJacobianExpected << AExpected, rhsExpected; + + // Whitened Jacobian + EXPECT(assert_equal(AExpected, factor.jacobian().first)); + EXPECT(assert_equal(rhsExpected, factor.jacobian().second)); + EXPECT(assert_equal(augmentedJacobianExpected, factor.augmentedJacobian())); + + // Unwhitened Jacobian + EXPECT(assert_equal(AExpected, factor.jacobianUnweighted().first)); + EXPECT(assert_equal(rhsExpected, factor.jacobianUnweighted().second)); + EXPECT(assert_equal(augmentedJacobianExpected, factor.augmentedJacobianUnweighted())); +} + +/* ************************************************************************* */ +TEST(LinearEquality, matrices) +{ + // And now witgh a non-unit noise model + LinearEquality factor(simple::terms, simple::b, 0); + + Matrix jacobianExpected(3, 9); + jacobianExpected << simple::terms[0].second, simple::terms[1].second, simple::terms[2].second; + Vector rhsExpected = simple::b; + Matrix augmentedJacobianExpected(3, 10); + augmentedJacobianExpected << jacobianExpected, rhsExpected; + + Matrix augmentedHessianExpected = + augmentedJacobianExpected.transpose() * simple::noise->R().transpose() + * simple::noise->R() * augmentedJacobianExpected; + + // Whitened Jacobian + EXPECT(assert_equal(jacobianExpected, factor.jacobian().first)); + EXPECT(assert_equal(rhsExpected, factor.jacobian().second)); + EXPECT(assert_equal(augmentedJacobianExpected, factor.augmentedJacobian())); + + // Unwhitened Jacobian + EXPECT(assert_equal(jacobianExpected, factor.jacobianUnweighted().first)); + EXPECT(assert_equal(rhsExpected, factor.jacobianUnweighted().second)); + EXPECT(assert_equal(augmentedJacobianExpected, factor.augmentedJacobianUnweighted())); +} + +/* ************************************************************************* */ +TEST(LinearEquality, operators ) +{ + Matrix I = eye(2); + Vector b = (Vector(2) << 0.2,-0.1).finished(); + LinearEquality lf(1, -I, 2, I, b, 0); + + VectorValues c; + c.insert(1, (Vector(2) << 10.,20.).finished()); + c.insert(2, (Vector(2) << 30.,60.).finished()); + + // test A*x + Vector expectedE = (Vector(2) << 20.,40.).finished(); + Vector actualE = lf * c; + EXPECT(assert_equal(expectedE, actualE)); + + // test A^e + VectorValues expectedX; + expectedX.insert(1, (Vector(2) << -20.,-40.).finished()); + expectedX.insert(2, (Vector(2) << 20., 40.).finished()); + VectorValues actualX = VectorValues::Zero(expectedX); + lf.transposeMultiplyAdd(1.0, actualE, actualX); + EXPECT(assert_equal(expectedX, actualX)); + + // test gradient at zero + Matrix A; Vector b2; boost::tie(A,b2) = lf.jacobian(); + VectorValues expectedG; + expectedG.insert(1, (Vector(2) << 0.2, -0.1).finished()); + expectedG.insert(2, (Vector(2) << -0.2, 0.1).finished()); + VectorValues actualG = lf.gradientAtZero(); + EXPECT(assert_equal(expectedG, actualG)); +} + +/* ************************************************************************* */ +TEST(LinearEquality, default_error ) +{ + LinearEquality f; + double actual = f.error(VectorValues()); + DOUBLES_EQUAL(0.0, actual, 1e-15); +} + +//* ************************************************************************* */ +TEST(LinearEquality, empty ) +{ + // create an empty factor + LinearEquality f; + EXPECT(f.empty()); +} + +/* ************************************************************************* */ +int main() { TestResult tr; return TestRegistry::runAllTests(tr);} +/* ************************************************************************* */ diff --git a/gtsam_unstable/linear/tests/testQPSolver.cpp b/gtsam_unstable/linear/tests/testQPSolver.cpp new file mode 100644 index 000000000..8fca61ca4 --- /dev/null +++ b/gtsam_unstable/linear/tests/testQPSolver.cpp @@ -0,0 +1,311 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testQPSolver.cpp + * @brief Test simple QP solver for a linear inequality constraint + * @date Apr 10, 2014 + * @author Duy-Nguyen Ta + */ + +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; +using namespace gtsam::symbol_shorthand; + +const Matrix One = ones(1,1); + +/* ************************************************************************* */ +// Create test graph according to Forst10book_pg171Ex5 +QP createTestCase() { + QP qp; + + // Objective functions x1^2 - x1*x2 + x2^2 - 3*x1 + 5 + // Note the Hessian encodes: + // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f + // Hence, we have G11=2, G12 = -1, g1 = +3, G22 = 2, g2 = 0, f = 10 + qp.cost.push_back( + HessianFactor(X(1), X(2), 2.0 * ones(1, 1), -ones(1, 1), 3.0 * ones(1), + 2.0 * ones(1, 1), zero(1), 10.0)); + + // Inequality constraints + qp.inequalities.push_back(LinearInequality(X(1), ones(1,1), X(2), ones(1,1), 2, 0)); // x1 + x2 <= 2 --> x1 + x2 -2 <= 0, --> b=2 + qp.inequalities.push_back(LinearInequality(X(1), -ones(1,1), 0, 1)); // -x1 <= 0 + qp.inequalities.push_back(LinearInequality(X(2), -ones(1,1), 0, 2)); // -x2 <= 0 + qp.inequalities.push_back(LinearInequality(X(1), ones(1,1), 1.5, 3)); // x1 <= 3/2 + + return qp; +} + +TEST(QPSolver, TestCase) { + VectorValues values; + double x1 = 5, x2 = 7; + values.insert(X(1), x1 * ones(1, 1)); + values.insert(X(2), x2 * ones(1, 1)); + QP qp = createTestCase(); + DOUBLES_EQUAL(29, x1 * x1 - x1 * x2 + x2 * x2 - 3 * x1 + 5, 1e-9); + DOUBLES_EQUAL(29, qp.cost[0]->error(values), 1e-9); +} + +TEST(QPSolver, constraintsAux) { + QP qp = createTestCase(); + + QPSolver solver(qp); + + VectorValues lambdas; + lambdas.insert(0, (Vector(1) << -0.5).finished()); + lambdas.insert(1, (Vector(1) << 0.0).finished()); + lambdas.insert(2, (Vector(1) << 0.3).finished()); + lambdas.insert(3, (Vector(1) << 0.1).finished()); + int factorIx = solver.identifyLeavingConstraint(qp.inequalities, lambdas); + LONGS_EQUAL(2, factorIx); + + VectorValues lambdas2; + lambdas2.insert(0, (Vector(1) << -0.5).finished()); + lambdas2.insert(1, (Vector(1) << 0.0).finished()); + lambdas2.insert(2, (Vector(1) << -0.3).finished()); + lambdas2.insert(3, (Vector(1) << -0.1).finished()); + int factorIx2 = solver.identifyLeavingConstraint(qp.inequalities, lambdas2); + LONGS_EQUAL(-1, factorIx2); +} + +/* ************************************************************************* */ +// Create a simple test graph with one equality constraint +QP createEqualityConstrainedTest() { + QP qp; + + // Objective functions x1^2 + x2^2 + // Note the Hessian encodes: + // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f + // Hence, we have G11=2, G12 = 0, g1 = 0, G22 = 2, g2 = 0, f = 0 + qp.cost.push_back( + HessianFactor(X(1), X(2), 2.0 * ones(1, 1), zeros(1, 1), zero(1), + 2.0 * ones(1, 1), zero(1), 0.0)); + + // Equality constraints + // x1 + x2 = 1 --> x1 + x2 -1 = 0, hence we negate the b vector + Matrix A1 = (Matrix(1, 1) << 1).finished(); + Matrix A2 = (Matrix(1, 1) << 1).finished(); + Vector b = -(Vector(1) << 1).finished(); + qp.equalities.push_back(LinearEquality(X(1), A1, X(2), A2, b, 0)); + + return qp; +} + +TEST(QPSolver, dual) { + QP qp = createEqualityConstrainedTest(); + + // Initials values + VectorValues initialValues; + initialValues.insert(X(1), ones(1)); + initialValues.insert(X(2), ones(1)); + + QPSolver solver(qp); + + GaussianFactorGraph::shared_ptr dualGraph = solver.buildDualGraph( + qp.inequalities, initialValues); + VectorValues dual = dualGraph->optimize(); + VectorValues expectedDual; + expectedDual.insert(0, (Vector(1) << 2.0).finished()); + CHECK(assert_equal(expectedDual, dual, 1e-10)); +} + +/* ************************************************************************* */ +TEST(QPSolver, indentifyActiveConstraints) { + QP qp = createTestCase(); + QPSolver solver(qp); + + VectorValues currentSolution; + currentSolution.insert(X(1), zero(1)); + currentSolution.insert(X(2), zero(1)); + + LinearInequalityFactorGraph workingSet = + solver.identifyActiveConstraints(qp.inequalities, currentSolution); + + CHECK(!workingSet.at(0)->active()); // inactive + CHECK(workingSet.at(1)->active()); // active + CHECK(workingSet.at(2)->active()); // active + CHECK(!workingSet.at(3)->active()); // inactive + + VectorValues solution = solver.solveWithCurrentWorkingSet(workingSet); + VectorValues expectedSolution; + expectedSolution.insert(X(1), (Vector(1) << 0.0).finished()); + expectedSolution.insert(X(2), (Vector(1) << 0.0).finished()); + CHECK(assert_equal(expectedSolution, solution, 1e-100)); + +} + +/* ************************************************************************* */ +TEST(QPSolver, iterate) { + QP qp = createTestCase(); + QPSolver solver(qp); + + VectorValues currentSolution; + currentSolution.insert(X(1), zero(1)); + currentSolution.insert(X(2), zero(1)); + + std::vector expectedSolutions(4), expectedDuals(4); + expectedSolutions[0].insert(X(1), (Vector(1) << 0.0).finished()); + expectedSolutions[0].insert(X(2), (Vector(1) << 0.0).finished()); + expectedDuals[0].insert(1, (Vector(1) << 3).finished()); + expectedDuals[0].insert(2, (Vector(1) << 0).finished()); + + expectedSolutions[1].insert(X(1), (Vector(1) << 1.5).finished()); + expectedSolutions[1].insert(X(2), (Vector(1) << 0.0).finished()); + expectedDuals[1].insert(3, (Vector(1) << 1.5).finished()); + + expectedSolutions[2].insert(X(1), (Vector(1) << 1.5).finished()); + expectedSolutions[2].insert(X(2), (Vector(1) << 0.75).finished()); + + expectedSolutions[3].insert(X(1), (Vector(1) << 1.5).finished()); + expectedSolutions[3].insert(X(2), (Vector(1) << 0.5).finished()); + + LinearInequalityFactorGraph workingSet = + solver.identifyActiveConstraints(qp.inequalities, currentSolution); + + QPState state(currentSolution, VectorValues(), workingSet, false); + + int it = 0; + while (!state.converged) { + state = solver.iterate(state); + // These checks will fail because the expected solutions obtained from + // Forst10book do not follow exactly what we implemented from Nocedal06book. + // Specifically, we do not re-identify active constraints and + // do not recompute dual variables after every step!!! +// CHECK(assert_equal(expectedSolutions[it], state.values, 1e-10)); +// CHECK(assert_equal(expectedDuals[it], state.duals, 1e-10)); + it++; + } + + CHECK(assert_equal(expectedSolutions[3], state.values, 1e-10)); +} + +/* ************************************************************************* */ + +TEST(QPSolver, optimizeForst10book_pg171Ex5) { + QP qp = createTestCase(); + QPSolver solver(qp); + VectorValues initialValues; + initialValues.insert(X(1), zero(1)); + initialValues.insert(X(2), zero(1)); + VectorValues solution; + boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); + VectorValues expectedSolution; + expectedSolution.insert(X(1), (Vector(1) << 1.5).finished()); + expectedSolution.insert(X(2), (Vector(1) << 0.5).finished()); + CHECK(assert_equal(expectedSolution, solution, 1e-100)); +} + +/* ************************************************************************* */ +// Create Matlab's test graph as in http://www.mathworks.com/help/optim/ug/quadprog.html +QP createTestMatlabQPEx() { + QP qp; + + // Objective functions 0.5*x1^2 + x2^2 - x1*x2 - 2*x1 -6*x2 + // Note the Hessian encodes: + // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f + // Hence, we have G11=1, G12 = -1, g1 = +2, G22 = 2, g2 = +6, f = 0 + qp.cost.push_back( + HessianFactor(X(1), X(2), 1.0 * ones(1, 1), -ones(1, 1), 2.0 * ones(1), + 2.0 * ones(1, 1), 6 * ones(1), 1000.0)); + + // Inequality constraints + qp.inequalities.push_back(LinearInequality(X(1), One, X(2), One, 2, 0)); // x1 + x2 <= 2 + qp.inequalities.push_back(LinearInequality(X(1), -One, X(2), 2*One, 2, 1)); //-x1 + 2*x2 <=2 + qp.inequalities.push_back(LinearInequality(X(1), 2*One, X(2), One, 3, 2)); // 2*x1 + x2 <=3 + qp.inequalities.push_back(LinearInequality(X(1), -One, 0, 3)); // -x1 <= 0 + qp.inequalities.push_back(LinearInequality(X(2), -One, 0, 4)); // -x2 <= 0 + + return qp; +} + +TEST(QPSolver, optimizeMatlabEx) { + QP qp = createTestMatlabQPEx(); + QPSolver solver(qp); + VectorValues initialValues; + initialValues.insert(X(1), zero(1)); + initialValues.insert(X(2), zero(1)); + VectorValues solution; + boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); + VectorValues expectedSolution; + expectedSolution.insert(X(1), (Vector(1) << 2.0 / 3.0).finished()); + expectedSolution.insert(X(2), (Vector(1) << 4.0 / 3.0).finished()); + CHECK(assert_equal(expectedSolution, solution, 1e-7)); +} + +/* ************************************************************************* */ +// Create test graph as in Nocedal06book, Ex 16.4, pg. 475 +QP createTestNocedal06bookEx16_4() { + QP qp; + + qp.cost.push_back(JacobianFactor(X(1), ones(1, 1), ones(1))); + qp.cost.push_back(JacobianFactor(X(2), ones(1, 1), 2.5 * ones(1))); + + // Inequality constraints + qp.inequalities.push_back(LinearInequality(X(1), -One, X(2), 2 * One, 2, 0)); + qp.inequalities.push_back(LinearInequality(X(1), One, X(2), 2 * One, 6, 1)); + qp.inequalities.push_back(LinearInequality(X(1), One, X(2), -2 * One, 2, 2)); + qp.inequalities.push_back(LinearInequality(X(1), -One, 0.0, 3)); + qp.inequalities.push_back(LinearInequality(X(2), -One, 0.0, 4)); + + return qp; +} + +TEST(QPSolver, optimizeNocedal06bookEx16_4) { + QP qp = createTestNocedal06bookEx16_4(); + QPSolver solver(qp); + VectorValues initialValues; + initialValues.insert(X(1), (Vector(1) << 2.0).finished()); + initialValues.insert(X(2), zero(1)); + + VectorValues solution; + boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); + VectorValues expectedSolution; + expectedSolution.insert(X(1), (Vector(1) << 1.4).finished()); + expectedSolution.insert(X(2), (Vector(1) << 1.7).finished()); + CHECK(assert_equal(expectedSolution, solution, 1e-7)); +} + +/* ************************************************************************* */ + +TEST(QPSolver, failedSubproblem) { + QP qp; + qp.cost.push_back(JacobianFactor(X(1), eye(2), zero(2))); + qp.cost.push_back(HessianFactor(X(1), zeros(2, 2), zero(2), 100.0)); + qp.inequalities.push_back( + LinearInequality(X(1), (Matrix(1,2) << -1.0, 0.0).finished(), -1.0, 0)); + + VectorValues expected; + expected.insert(X(1), (Vector(2) << 1.0, 0.0).finished()); + + VectorValues initialValues; + initialValues.insert(X(1), (Vector(2) << 10.0, 100.0).finished()); + + QPSolver solver(qp); + VectorValues solution; + boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); +// graph.print("Graph: "); +// solution.print("Solution: "); + CHECK(assert_equal(expected, solution, 1e-7)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ +