diff --git a/.cproject b/.cproject index 19005c63f..97e36d81f 100644 --- a/.cproject +++ b/.cproject @@ -1,19 +1,17 @@ - - - + - - + + @@ -62,13 +60,13 @@ - - + + @@ -118,13 +116,13 @@ - - + + @@ -1932,6 +1930,22 @@ true true + + make + -j5 + testImuFactor.run + true + true + true + + + make + -j5 + testCombinedImuFactor.run + true + true + true + make -j2 @@ -2855,6 +2869,14 @@ true true + + make + -j5 + check.tests + true + true + true + make -j5 diff --git a/examples/Data/dubrovnik-1-1-pre.txt b/examples/Data/dubrovnik-1-1-pre.txt new file mode 100644 index 000000000..612dc16d6 --- /dev/null +++ b/examples/Data/dubrovnik-1-1-pre.txt @@ -0,0 +1,17 @@ +1 1 1 + +0 0 -3.859900e+02 3.871200e+02 + +-1.6943983532198115e-02 +1.1171804676513932e-02 +2.4643508831711991e-03 +7.3030995682610689e-01 +-2.6490818471043420e-01 +-1.7127892627337182e+00 +1.4300319432711681e+03 +-7.5572758535864072e-08 +3.2377569465570913e-14 + +-1.2055995050700867e+01 +1.2838775976205760e+01 +-4.1099369264082803e+01 \ No newline at end of file diff --git a/gtsam/base/SymmetricBlockMatrix.cpp b/gtsam/base/SymmetricBlockMatrix.cpp index 96adf04b1..d95fe84b7 100644 --- a/gtsam/base/SymmetricBlockMatrix.cpp +++ b/gtsam/base/SymmetricBlockMatrix.cpp @@ -1,20 +1,20 @@ /* ---------------------------------------------------------------------------- -* GTSAM Copyright 2010, Georgia Tech Research Corporation, -* Atlanta, Georgia 30332-0415 -* All Rights Reserved -* Authors: Frank Dellaert, et al. (see THANKS for the full author list) + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) -* See LICENSE for the license information + * See LICENSE for the license information -* -------------------------------------------------------------------------- */ + * -------------------------------------------------------------------------- */ /** -* @file SymmetricBlockMatrix.cpp -* @brief Access to matrices via blocks of pre-defined sizes. Used in GaussianFactor and GaussianConditional. -* @author Richard Roberts -* @date Sep 18, 2010 -*/ + * @file SymmetricBlockMatrix.cpp + * @brief Access to matrices via blocks of pre-defined sizes. Used in GaussianFactor and GaussianConditional. + * @author Richard Roberts + * @date Sep 18, 2010 + */ #include #include @@ -23,42 +23,43 @@ namespace gtsam { - /* ************************************************************************* */ - SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf(const SymmetricBlockMatrix& other) - { - SymmetricBlockMatrix result; - result.variableColOffsets_.resize(other.nBlocks() + 1); - for(size_t i = 0; i < result.variableColOffsets_.size(); ++i) - result.variableColOffsets_[i] = - other.variableColOffsets_[other.blockStart_ + i] - other.variableColOffsets_[other.blockStart_]; - result.matrix_.resize(other.cols(), other.cols()); - result.assertInvariants(); - return result; - } +/* ************************************************************************* */ +SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf( + const SymmetricBlockMatrix& other) { + SymmetricBlockMatrix result; + result.variableColOffsets_.resize(other.nBlocks() + 1); + for (size_t i = 0; i < result.variableColOffsets_.size(); ++i) + result.variableColOffsets_[i] = other.variableColOffsets_[other.blockStart_ + + i] - other.variableColOffsets_[other.blockStart_]; + result.matrix_.resize(other.cols(), other.cols()); + result.assertInvariants(); + return result; +} - /* ************************************************************************* */ - SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf(const VerticalBlockMatrix& other) - { - SymmetricBlockMatrix result; - result.variableColOffsets_.resize(other.nBlocks() + 1); - for(size_t i = 0; i < result.variableColOffsets_.size(); ++i) - result.variableColOffsets_[i] = - other.variableColOffsets_[other.blockStart_ + i] - other.variableColOffsets_[other.blockStart_]; - result.matrix_.resize(other.cols(), other.cols()); - result.assertInvariants(); - return result; - } +/* ************************************************************************* */ +SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf( + const VerticalBlockMatrix& other) { + SymmetricBlockMatrix result; + result.variableColOffsets_.resize(other.nBlocks() + 1); + for (size_t i = 0; i < result.variableColOffsets_.size(); ++i) + result.variableColOffsets_[i] = other.variableColOffsets_[other.blockStart_ + + i] - other.variableColOffsets_[other.blockStart_]; + result.matrix_.resize(other.cols(), other.cols()); + result.assertInvariants(); + return result; +} - /* ************************************************************************* */ - VerticalBlockMatrix SymmetricBlockMatrix::choleskyPartial(DenseIndex nFrontals) - { - // Do dense elimination - if(!blockStart() == 0) - throw std::invalid_argument("Can only do Cholesky when the SymmetricBlockMatrix is not a restricted view, i.e. when blockStart == 0."); - if(!gtsam::choleskyPartial(matrix_, offset(nFrontals))) - throw CholeskyFailed(); +/* ************************************************************************* */ +VerticalBlockMatrix SymmetricBlockMatrix::choleskyPartial( + DenseIndex nFrontals) { + // Do dense elimination + if (!blockStart() == 0) + throw std::invalid_argument( + "Can only do Cholesky when the SymmetricBlockMatrix is not a restricted view, i.e. when blockStart == 0."); + if (!gtsam::choleskyPartial(matrix_, offset(nFrontals))) + throw CholeskyFailed(); - // Split conditional + // Split conditional // Create one big conditionals with many frontal variables. gttic(Construct_conditional); @@ -68,11 +69,14 @@ namespace gtsam { Ab.full().triangularView().setZero(); gttoc(Construct_conditional); - gttic(Remaining_factor); - // Take lower-right block of Ab_ to get the remaining factor - blockStart() = nFrontals; - gttoc(Remaining_factor); + gttic(Remaining_factor); + // Take lower-right block of Ab_ to get the remaining factor + blockStart() = nFrontals; + gttoc(Remaining_factor); - return Ab; - } + return Ab; } +/* ************************************************************************* */ + +} //\ namespace gtsam + diff --git a/gtsam/base/VerticalBlockMatrix.cpp b/gtsam/base/VerticalBlockMatrix.cpp index 0361264fe..9f54b9c97 100644 --- a/gtsam/base/VerticalBlockMatrix.cpp +++ b/gtsam/base/VerticalBlockMatrix.cpp @@ -21,32 +21,32 @@ namespace gtsam { - /* ************************************************************************* */ - VerticalBlockMatrix VerticalBlockMatrix::LikeActiveViewOf(const VerticalBlockMatrix& other) - { - VerticalBlockMatrix result; - result.variableColOffsets_.resize(other.nBlocks() + 1); - for(size_t i = 0; i < result.variableColOffsets_.size(); ++i) - result.variableColOffsets_[i] = - other.variableColOffsets_[other.blockStart_ + i] - other.variableColOffsets_[other.blockStart_]; - result.matrix_.resize(other.rows(), result.variableColOffsets_.back()); - result.rowEnd_ = other.rows(); - result.assertInvariants(); - return result; - } +/* ************************************************************************* */ +VerticalBlockMatrix VerticalBlockMatrix::LikeActiveViewOf( + const VerticalBlockMatrix& other) { + VerticalBlockMatrix result; + result.variableColOffsets_.resize(other.nBlocks() + 1); + for (size_t i = 0; i < result.variableColOffsets_.size(); ++i) + result.variableColOffsets_[i] = other.variableColOffsets_[other.blockStart_ + + i] - other.variableColOffsets_[other.blockStart_]; + result.matrix_.resize(other.rows(), result.variableColOffsets_.back()); + result.rowEnd_ = other.rows(); + result.assertInvariants(); + return result; +} - /* ************************************************************************* */ - VerticalBlockMatrix VerticalBlockMatrix::LikeActiveViewOf(const SymmetricBlockMatrix& other, DenseIndex height) - { - VerticalBlockMatrix result; - result.variableColOffsets_.resize(other.nBlocks() + 1); - for(size_t i = 0; i < result.variableColOffsets_.size(); ++i) - result.variableColOffsets_[i] = - other.variableColOffsets_[other.blockStart_ + i] - other.variableColOffsets_[other.blockStart_]; - result.matrix_.resize(height, result.variableColOffsets_.back()); - result.rowEnd_ = height; - result.assertInvariants(); - return result; - } +/* ************************************************************************* */ +VerticalBlockMatrix VerticalBlockMatrix::LikeActiveViewOf( + const SymmetricBlockMatrix& other, DenseIndex height) { + VerticalBlockMatrix result; + result.variableColOffsets_.resize(other.nBlocks() + 1); + for (size_t i = 0; i < result.variableColOffsets_.size(); ++i) + result.variableColOffsets_[i] = other.variableColOffsets_[other.blockStart_ + + i] - other.variableColOffsets_[other.blockStart_]; + result.matrix_.resize(height, result.variableColOffsets_.back()); + result.rowEnd_ = height; + result.assertInvariants(); + return result; +} } diff --git a/gtsam/base/tests/testVerticalBlockMatrix.cpp b/gtsam/base/tests/testVerticalBlockMatrix.cpp new file mode 100644 index 000000000..fad23fa7d --- /dev/null +++ b/gtsam/base/tests/testVerticalBlockMatrix.cpp @@ -0,0 +1,47 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testVerticalBlockMatrix.cpp + * @brief Unit tests for VerticalBlockMatrix class + * @author Frank Dellaert + * @date February 15, 2014 + **/ + +#include +#include +#include + +using namespace std; +using namespace gtsam; +using boost::assign::list_of; + +//***************************************************************************** +TEST(VerticalBlockMatrix, constructor) { + VerticalBlockMatrix actual(list_of(3)(2)(1), + (Matrix(6, 6) << 1, 2, 3, 4, 5, 6, // + 2, 8, 9, 10, 11, 12, // + 3, 9, 15, 16, 17, 18, // + 4, 10, 16, 22, 23, 24, // + 5, 11, 17, 23, 29, 30, // + 6, 12, 18, 24, 30, 36)); + EXPECT_LONGS_EQUAL(6,actual.rows()); + EXPECT_LONGS_EQUAL(6,actual.cols()); + EXPECT_LONGS_EQUAL(3,actual.nBlocks()); +} + +//***************************************************************************** +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +//***************************************************************************** + diff --git a/gtsam/geometry/Cal3Bundler.cpp b/gtsam/geometry/Cal3Bundler.cpp index 03071a4f2..587d0ea63 100644 --- a/gtsam/geometry/Cal3Bundler.cpp +++ b/gtsam/geometry/Cal3Bundler.cpp @@ -78,17 +78,16 @@ Point2 Cal3Bundler::uncalibrate(const Point2& p, // // Derivatives make use of intermediate variables above if (Dcal) { double rx = r * x, ry = r * y; - Eigen::Matrix D; - D << u, f_ * rx, f_ * r * rx, v, f_ * ry, f_ * r * ry; - *Dcal = D; + Dcal->resize(2, 3); + *Dcal << u, f_ * rx, f_ * r * rx, v, f_ * ry, f_ * r * ry; } if (Dp) { const double a = 2. * (k1_ + 2. * k2_ * r); const double axx = a * x * x, axy = a * x * y, ayy = a * y * y; - Eigen::Matrix D; - D << g + axx, axy, axy, g + ayy; - *Dp = f_ * D; + Dp->resize(2,2); + *Dp << g + axx, axy, axy, g + ayy; + *Dp *= f_; } return Point2(u0_ + f_ * u, v0_ + f_ * v); diff --git a/gtsam/geometry/PinholeCamera.h b/gtsam/geometry/PinholeCamera.h index c162171c0..19eb87b5f 100644 --- a/gtsam/geometry/PinholeCamera.h +++ b/gtsam/geometry/PinholeCamera.h @@ -271,6 +271,10 @@ public: */ inline static Point2 project_to_camera(const Point3& P, boost::optional Dpoint = boost::none) { +#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION + if (P.z() <= 0) + throw CheiralityException(); +#endif if (Dpoint) { double d = 1.0 / P.z(), d2 = d * d; *Dpoint = (Matrix(2, 3) << d, 0.0, -P.x() * d2, 0.0, d, -P.y() * d2); @@ -300,40 +304,25 @@ public: // Transform to camera coordinates and check cheirality const Point3 pc = pose_.transform_to(pw); -#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION - if (pc.z() <= 0) - throw CheiralityException(); -#endif - // Project to normalized image coordinates const Point2 pn = project_to_camera(pc); if (Dpose || Dpoint) { - // optimized version of derivatives, see CalibratedCamera.nb const double z = pc.z(), d = 1.0 / z; - const double u = pn.x(), v = pn.y(); - Matrix Dpn_pose(2, 6), Dpn_point(2, 3); - if (Dpose) { - double uv = u * v, uu = u * u, vv = v * v; - Dpn_pose << uv, -1 - uu, v, -d, 0, d * u, 1 + vv, -uv, -u, 0, -d, d * v; - } - if (Dpoint) { - const Matrix R(pose_.rotation().matrix()); - Dpn_point << // - R(0, 0) - u * R(0, 2), R(1, 0) - u * R(1, 2), R(2, 0) - u * R(2, 2), // - /**/R(0, 1) - v * R(0, 2), R(1, 1) - v * R(1, 2), R(2, 1) - v * R(2, 2); - Dpn_point *= d; - } // uncalibration - Matrix Dpi_pn; // 2*2 + Matrix Dpi_pn(2, 2); const Point2 pi = K_.uncalibrate(pn, Dcal, Dpi_pn); // chain the Jacobian matrices - if (Dpose) - *Dpose = Dpi_pn * Dpn_pose; - if (Dpoint) - *Dpoint = Dpi_pn * Dpn_point; + if (Dpose) { + Dpose->resize(2, 6); + calculateDpose(pn, d, Dpi_pn, *Dpose); + } + if (Dpoint) { + Dpoint->resize(2, 3); + calculateDpoint(pn, d, pose_.rotation().matrix(), Dpi_pn, *Dpoint); + } return pi; } else return K_.uncalibrate(pn, Dcal); @@ -391,27 +380,29 @@ public: boost::optional Dcamera = boost::none, boost::optional Dpoint = boost::none) const { + const Point3 pc = pose_.transform_to(pw); + const Point2 pn = project_to_camera(pc); + if (!Dcamera && !Dpoint) { - const Point3 pc = pose_.transform_to(pw); - -#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION - if (pc.z() <= 0) - throw CheiralityException(); -#endif - const Point2 pn = project_to_camera(pc); return K_.uncalibrate(pn); - } + } else { + const double z = pc.z(), d = 1.0 / z; - Matrix Dpose, Dp, Dcal; - const Point2 pi = this->project(pw, Dpose, Dp, Dcal); - if (Dcamera) { - *Dcamera = Matrix(2, this->dim()); - Dcamera->leftCols(pose().dim()) = Dpose; // Jacobian wrt pose3 - Dcamera->rightCols(calibration().dim()) = Dcal; // Jacobian wrt calib + // uncalibration + Matrix Dcal, Dpi_pn(2, 2); + const Point2 pi = K_.uncalibrate(pn, Dcal, Dpi_pn); + + if (Dcamera) { + Dcamera->resize(2, this->dim()); + calculateDpose(pn, d, Dpi_pn, Dcamera->leftCols<6>()); + Dcamera->rightCols(K_.dim()) = Dcal; // Jacobian wrt calib + } + if (Dpoint) { + Dpoint->resize(2, 3); + calculateDpoint(pn, d, pose_.rotation().matrix(), Dpi_pn, *Dpoint); + } + return pi; } - if (Dpoint) - *Dpoint = Dp; - return pi; } /// backproject a 2-dimensional point to a 3-dimensional point at given depth @@ -516,6 +507,50 @@ public: private: + /** + * Calculate Jacobian with respect to pose + * @param pn projection in normalized coordinates + * @param d disparity (inverse depth) + * @param Dpi_pn derivative of uncalibrate with respect to pn + * @param Dpose Output argument, can be matrix or block, assumed right size ! + * See http://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html + */ + template + static void calculateDpose(const Point2& pn, double d, const Matrix& Dpi_pn, + Eigen::MatrixBase const & Dpose) { + // optimized version of derivatives, see CalibratedCamera.nb + const double u = pn.x(), v = pn.y(); + double uv = u * v, uu = u * u, vv = v * v; + Eigen::Matrix Dpn_pose; + Dpn_pose << uv, -1 - uu, v, -d, 0, d * u, 1 + vv, -uv, -u, 0, -d, d * v; + assert(Dpose.rows()==2 && Dpose.cols()==6); + const_cast&>(Dpose) = // + Dpi_pn.block<2, 2>(0, 0) * Dpn_pose; + } + + /** + * Calculate Jacobian with respect to point + * @param pn projection in normalized coordinates + * @param d disparity (inverse depth) + * @param Dpi_pn derivative of uncalibrate with respect to pn + * @param Dpoint Output argument, can be matrix or block, assumed right size ! + * See http://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html + */ + template + static void calculateDpoint(const Point2& pn, double d, const Matrix& R, + const Matrix& Dpi_pn, Eigen::MatrixBase const & Dpoint) { + // optimized version of derivatives, see CalibratedCamera.nb + const double u = pn.x(), v = pn.y(); + Eigen::Matrix Dpn_point; + Dpn_point << // + R(0, 0) - u * R(0, 2), R(1, 0) - u * R(1, 2), R(2, 0) - u * R(2, 2), // + /**/R(0, 1) - v * R(0, 2), R(1, 1) - v * R(1, 2), R(2, 1) - v * R(2, 2); + Dpn_point *= d; + assert(Dpoint.rows()==2 && Dpoint.cols()==3); + const_cast&>(Dpoint) = // + Dpi_pn.block<2, 2>(0, 0) * Dpn_point; + } + /// @} /// @name Advanced Interface /// @{ diff --git a/gtsam/geometry/Rot3.cpp b/gtsam/geometry/Rot3.cpp index a6b3c3caf..37aa78a78 100644 --- a/gtsam/geometry/Rot3.cpp +++ b/gtsam/geometry/Rot3.cpp @@ -101,10 +101,9 @@ Unit3 Rot3::operator*(const Unit3& p) const { // see doc/math.lyx, SO(3) section Point3 Rot3::unrotate(const Point3& p, boost::optional H1, boost::optional H2) const { - const Matrix Rt(transpose()); - Point3 q(Rt*p.vector()); // q = Rt*p + Point3 q(transpose()*p.vector()); // q = Rt*p if (H1) *H1 = skewSymmetric(q.x(), q.y(), q.z()); - if (H2) *H2 = Rt; + if (H2) *H2 = transpose(); return q; } @@ -175,6 +174,39 @@ Vector Rot3::quaternion() const { return v; } +/* ************************************************************************* */ +Matrix3 Rot3::rightJacobianExpMapSO3(const Vector3& x) { + // x is the axis-angle representation (exponential coordinates) for a rotation + double normx = norm_2(x); // rotation angle + Matrix3 Jr; + if (normx < 10e-8){ + Jr = Matrix3::Identity(); + } + else{ + const Matrix3 X = skewSymmetric(x); // element of Lie algebra so(3): X = x^ + Jr = Matrix3::Identity() - ((1-cos(normx))/(normx*normx)) * X + + ((normx-sin(normx))/(normx*normx*normx)) * X * X; // right Jacobian + } + return Jr; +} + +/* ************************************************************************* */ +Matrix3 Rot3::rightJacobianExpMapSO3inverse(const Vector3& x) { + // x is the axis-angle representation (exponential coordinates) for a rotation + double normx = norm_2(x); // rotation angle + Matrix3 Jrinv; + + if (normx < 10e-8){ + Jrinv = Matrix3::Identity(); + } + else{ + const Matrix3 X = skewSymmetric(x); // element of Lie algebra so(3): X = x^ + Jrinv = Matrix3::Identity() + + 0.5 * X + (1/(normx*normx) - (1+cos(normx))/(2*normx * sin(normx)) ) * X * X; + } + return Jrinv; +} + /* ************************************************************************* */ pair RQ(const Matrix3& A) { diff --git a/gtsam/geometry/Rot3.h b/gtsam/geometry/Rot3.h index 73316052d..c8aeae51b 100644 --- a/gtsam/geometry/Rot3.h +++ b/gtsam/geometry/Rot3.h @@ -304,6 +304,17 @@ namespace gtsam { /// Left-trivialized derivative inverse of the exponential map static Matrix3 dexpInvL(const Vector3& v); + /** + * Right Jacobian for Exponential map in SO(3) - equation (10.86) and following equations in + * G.S. Chirikjian, "Stochastic Models, Information Theory, and Lie Groups", Volume 2, 2008. + */ + static Matrix3 rightJacobianExpMapSO3(const Vector3& x); + + /** Right Jacobian for Log map in SO(3) - equation (10.86) and following equations in + * G.S. Chirikjian, "Stochastic Models, Information Theory, and Lie Groups", Volume 2, 2008. + */ + static Matrix3 rightJacobianExpMapSO3inverse(const Vector3& x); + /// @} /// @name Group Action on Point3 /// @{ diff --git a/gtsam/geometry/tests/testPinholeCamera.cpp b/gtsam/geometry/tests/testPinholeCamera.cpp index 594db159b..dcf3b2596 100644 --- a/gtsam/geometry/tests/testPinholeCamera.cpp +++ b/gtsam/geometry/tests/testPinholeCamera.cpp @@ -169,38 +169,55 @@ static Point2 project3(const Pose3& pose, const Point3& point, const Cal3_S2& ca } /* ************************************************************************* */ -static Point2 projectInfinity3(const Pose3& pose, const Point2& point2D, const Cal3_S2& cal) { - Point3 point(point2D.x(), point2D.y(), 1.0); - return Camera(pose,cal).projectPointAtInfinity(point); -} - -/* ************************************************************************* */ -TEST( PinholeCamera, Dproject_point_pose) +TEST( PinholeCamera, Dproject) { Matrix Dpose, Dpoint, Dcal; Point2 result = camera.project(point1, Dpose, Dpoint, Dcal); Matrix numerical_pose = numericalDerivative31(project3, pose1, point1, K); Matrix numerical_point = numericalDerivative32(project3, pose1, point1, K); Matrix numerical_cal = numericalDerivative33(project3, pose1, point1, K); - CHECK(assert_equal(result, Point2(-100, 100) )); - CHECK(assert_equal(Dpose, numerical_pose, 1e-7)); - CHECK(assert_equal(Dpoint, numerical_point,1e-7)); - CHECK(assert_equal(Dcal, numerical_cal,1e-7)); + CHECK(assert_equal(Point2(-100, 100), result)); + CHECK(assert_equal(numerical_pose, Dpose, 1e-7)); + CHECK(assert_equal(numerical_point, Dpoint, 1e-7)); + CHECK(assert_equal(numerical_cal, Dcal, 1e-7)); } /* ************************************************************************* */ -TEST( PinholeCamera, Dproject_point_pose_Infinity) +static Point2 projectInfinity3(const Pose3& pose, const Point2& point2D, const Cal3_S2& cal) { + Point3 point(point2D.x(), point2D.y(), 1.0); + return Camera(pose,cal).projectPointAtInfinity(point); +} + +/* ************************************************************************* */ +//TEST( PinholeCamera, Dproject_Infinity) +//{ +// Matrix Dpose, Dpoint, Dcal; +// Point2 point2D(-0.08,-0.08); +// Point3 point3D(point1.x(), point1.y(), 1.0); +// Point2 result = camera.projectPointAtInfinity(point3D, Dpose, Dpoint, Dcal); +// Matrix numerical_pose = numericalDerivative31(projectInfinity3, pose1, point2D, K); +// Matrix numerical_point = numericalDerivative32(projectInfinity3, pose1, point2D, K); +// Matrix numerical_cal = numericalDerivative33(projectInfinity3, pose1, point2D, K); +// CHECK(assert_equal(numerical_pose, Dpose, 1e-7)); +// CHECK(assert_equal(numerical_point, Dpoint, 1e-7)); +// CHECK(assert_equal(numerical_cal, Dcal, 1e-7)); +//} + +/* ************************************************************************* */ +static Point2 project4(const Camera& camera, const Point3& point) { + return camera.project2(point); +} + +/* ************************************************************************* */ +TEST( PinholeCamera, Dproject2) { - Matrix Dpose, Dpoint, Dcal; - Point2 point2D(-0.08,-0.08); - Point3 point3D(point1.x(), point1.y(), 1.0); - Point2 result = camera.projectPointAtInfinity(point3D, Dpose, Dpoint, Dcal); - Matrix numerical_pose = numericalDerivative31(projectInfinity3, pose1, point2D, K); - Matrix numerical_point = numericalDerivative32(projectInfinity3, pose1, point2D, K); - Matrix numerical_cal = numericalDerivative33(projectInfinity3, pose1, point2D, K); - CHECK(assert_equal(Dpose, numerical_pose, 1e-7)); - CHECK(assert_equal(Dpoint, numerical_point,1e-7)); - CHECK(assert_equal(Dcal, numerical_cal,1e-7)); + Matrix Dcamera, Dpoint; + Point2 result = camera.project2(point1, Dcamera, Dpoint); + Matrix numerical_camera = numericalDerivative21(project4, camera, point1); + Matrix numerical_point = numericalDerivative22(project4, camera, point1); + CHECK(assert_equal(result, Point2(-100, 100) )); + CHECK(assert_equal(numerical_camera, Dcamera, 1e-7)); + CHECK(assert_equal(numerical_point, Dpoint, 1e-7)); } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testRot3M.cpp b/gtsam/geometry/tests/testRot3M.cpp index 7b80e8ee9..f0e60ba03 100644 --- a/gtsam/geometry/tests/testRot3M.cpp +++ b/gtsam/geometry/tests/testRot3M.cpp @@ -200,6 +200,37 @@ TEST(Rot3, log) CHECK_OMEGA_ZERO(x*2.*PI,y*2.*PI,z*2.*PI) } +Rot3 evaluateRotation(const Vector3 thetahat){ + return Rot3::Expmap(thetahat); +} + +Vector3 evaluateLogRotation(const Vector3 thetahat, const Vector3 deltatheta){ + return Rot3::Logmap( Rot3::Expmap(thetahat).compose( Rot3::Expmap(deltatheta) ) ); +} + +/* ************************************************************************* */ +TEST( Rot3, rightJacobianExpMapSO3 ) +{ + // Linearization point + Vector3 thetahat; thetahat << 0.1, 0, 0; + + Matrix expectedJacobian = numericalDerivative11(boost::bind(&evaluateRotation, _1), LieVector(thetahat)); + Matrix actualJacobian = Rot3::rightJacobianExpMapSO3(thetahat); + EXPECT(assert_equal(expectedJacobian, actualJacobian)); +} + +/* ************************************************************************* */ +TEST( Rot3, rightJacobianExpMapSO3inverse ) +{ + // Linearization point + Vector3 thetahat; thetahat << 0.1,0.1,0; ///< Current estimate of rotation rate bias + Vector3 deltatheta; deltatheta << 0, 0, 0; + + Matrix expectedJacobian = numericalDerivative11(boost::bind(&evaluateLogRotation, thetahat, _1), LieVector(deltatheta)); + Matrix actualJacobian = Rot3::rightJacobianExpMapSO3inverse(thetahat); + EXPECT(assert_equal(expectedJacobian, actualJacobian)); +} + /* ************************************************************************* */ TEST(Rot3, manifold_caley) { diff --git a/gtsam/linear/GaussianFactor.h b/gtsam/linear/GaussianFactor.h index 969529c78..5fb2e15bd 100644 --- a/gtsam/linear/GaussianFactor.h +++ b/gtsam/linear/GaussianFactor.h @@ -96,6 +96,12 @@ namespace gtsam { */ virtual Matrix information() const = 0; + /// Return the diagonal of the Hessian for this factor + virtual VectorValues hessianDiagonal() const = 0; + + /// Return the block diagonal of the Hessian for this factor + virtual std::map hessianBlockDiagonal() const = 0; + /** Clone a factor (make a deep copy) */ virtual GaussianFactor::shared_ptr clone() const = 0; @@ -112,6 +118,12 @@ namespace gtsam { /// y += alpha * A'*A*x virtual void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const = 0; + /// y += alpha * A'*A*x + virtual void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector keys) const = 0; + + /// y += alpha * A'*A*x + virtual void multiplyHessianAdd(double alpha, const double* x, double* y) const = 0; + /// A'*b for Jacobian, eta for Hessian virtual VectorValues gradientAtZero() const = 0; diff --git a/gtsam/linear/GaussianFactorGraph.cpp b/gtsam/linear/GaussianFactorGraph.cpp index 7d0913d86..b56270a55 100644 --- a/gtsam/linear/GaussianFactorGraph.cpp +++ b/gtsam/linear/GaussianFactorGraph.cpp @@ -54,6 +54,27 @@ namespace gtsam { return keys; } + /* ************************************************************************* */ + vector GaussianFactorGraph::getkeydim() const { + // First find dimensions of each variable + vector dims; + BOOST_FOREACH(const sharedFactor& factor, *this) { + for (GaussianFactor::const_iterator pos = factor->begin(); + pos != factor->end(); ++pos) { + if (dims.size() <= *pos) + dims.resize(*pos + 1, 0); + dims[*pos] = factor->getDim(pos); + } + } + // Find accumulated dimensions for variables + vector dims_accumulated; + dims_accumulated.resize(dims.size()+1,0); + dims_accumulated[0]=0; + for (int i=1; i > GaussianFactorGraph::sparseJacobian() const { + vector > GaussianFactorGraph::sparseJacobian() const { // First find dimensions of each variable vector dims; BOOST_FOREACH(const sharedFactor& factor, *this) { - for(GaussianFactor::const_iterator pos = factor->begin(); pos != factor->end(); ++pos) { - if(dims.size() <= *pos) + for (GaussianFactor::const_iterator pos = factor->begin(); + pos != factor->end(); ++pos) { + if (dims.size() <= *pos) dims.resize(*pos + 1, 0); dims[*pos] = factor->getDim(pos); } } // Compute first scalar column of each variable - vector columnIndices(dims.size()+1, 0); - for(size_t j=1; j columnIndices(dims.size() + 1, 0); + for (size_t j = 1; j < dims.size() + 1; ++j) + columnIndices[j] = columnIndices[j - 1] + dims[j - 1]; // Iterate over all factors, adding sparse scalar entries typedef boost::tuple triplet; @@ -104,7 +126,8 @@ namespace gtsam { JacobianFactor::shared_ptr jacobianFactor( boost::dynamic_pointer_cast(factor)); if (!jacobianFactor) { - HessianFactor::shared_ptr hessian(boost::dynamic_pointer_cast(factor)); + HessianFactor::shared_ptr hessian( + boost::dynamic_pointer_cast(factor)); if (hessian) jacobianFactor.reset(new JacobianFactor(*hessian)); else @@ -115,22 +138,23 @@ namespace gtsam { // Whiten the factor and add entries for it // iterate over all variables in the factor const JacobianFactor whitened(jacobianFactor->whiten()); - for(JacobianFactor::const_iterator pos=whitened.begin(); pos 1e-12) entries.push_back( - boost::make_tuple(row+i, column_start+j, s)); + double s = whitenedA(i, j); + if (std::abs(s) > 1e-12) + entries.push_back(boost::make_tuple(row + i, column_start + j, s)); } } JacobianFactor::constBVector whitenedb(whitened.getb()); size_t bcolumn = columnIndices.back(); for (size_t i = 0; i < (size_t) whitenedb.size(); i++) - entries.push_back(boost::make_tuple(row+i, bcolumn, whitenedb(i))); + entries.push_back(boost::make_tuple(row + i, bcolumn, whitenedb(i))); // Increment row index row += jacobianFactor->rows(); @@ -143,7 +167,7 @@ namespace gtsam { // call sparseJacobian typedef boost::tuple triplet; - std::vector result = sparseJacobian(); + vector result = sparseJacobian(); // translate to base 1 matrix size_t nzmax = result.size(); @@ -158,22 +182,24 @@ namespace gtsam { } /* ************************************************************************* */ - Matrix GaussianFactorGraph::augmentedJacobian(boost::optional optionalOrdering) const { + Matrix GaussianFactorGraph::augmentedJacobian( + boost::optional optionalOrdering) const { // combine all factors JacobianFactor combined(*this, optionalOrdering); return combined.augmentedJacobian(); } /* ************************************************************************* */ - std::pair GaussianFactorGraph::jacobian(boost::optional optionalOrdering) const { + pair GaussianFactorGraph::jacobian( + boost::optional optionalOrdering) const { Matrix augmented = augmentedJacobian(optionalOrdering); - return make_pair( - augmented.leftCols(augmented.cols()-1), - augmented.col(augmented.cols()-1)); + return make_pair(augmented.leftCols(augmented.cols() - 1), + augmented.col(augmented.cols() - 1)); } /* ************************************************************************* */ - Matrix GaussianFactorGraph::augmentedHessian(boost::optional optionalOrdering) const { + Matrix GaussianFactorGraph::augmentedHessian( + boost::optional optionalOrdering) const { // combine all factors and get upper-triangular part of Hessian HessianFactor combined(*this, Scatter(*this, optionalOrdering)); Matrix result = combined.info(); @@ -183,13 +209,44 @@ namespace gtsam { } /* ************************************************************************* */ - std::pair GaussianFactorGraph::hessian(boost::optional optionalOrdering) const { + pair GaussianFactorGraph::hessian( + boost::optional optionalOrdering) const { Matrix augmented = augmentedHessian(optionalOrdering); return make_pair( - augmented.topLeftCorner(augmented.rows()-1, augmented.rows()-1), - augmented.col(augmented.rows()-1).head(augmented.rows()-1)); + augmented.topLeftCorner(augmented.rows() - 1, augmented.rows() - 1), + augmented.col(augmented.rows() - 1).head(augmented.rows() - 1)); } - + + /* ************************************************************************* */ + VectorValues GaussianFactorGraph::hessianDiagonal() const { + VectorValues d; + BOOST_FOREACH(const sharedFactor& factor, *this) { + if(factor){ + VectorValues di = factor->hessianDiagonal(); + d.addInPlace_(di); + } + } + return d; + } + + /* ************************************************************************* */ + map GaussianFactorGraph::hessianBlockDiagonal() const { + map blocks; + BOOST_FOREACH(const sharedFactor& factor, *this) { + map BD = factor->hessianBlockDiagonal(); + map::const_iterator it = BD.begin(); + for(;it!=BD.end();it++) { + Key j = it->first; // variable key for this block + const Matrix& Bj = it->second; + if (blocks.count(j)) + blocks[j] += Bj; + else + blocks.insert(make_pair(j,Bj)); + } + } + return blocks; + } + /* ************************************************************************* */ VectorValues GaussianFactorGraph::optimize(OptionalOrdering ordering, const Eliminate& function) const { @@ -201,9 +258,9 @@ namespace gtsam { namespace { JacobianFactor::shared_ptr convertToJacobianFactorPtr(const GaussianFactor::shared_ptr &gf) { JacobianFactor::shared_ptr result = boost::dynamic_pointer_cast(gf); - if( !result ) { - result = boost::make_shared(*gf); // Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with Cholesky) - } + if( !result ) + // Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with Cholesky) + result = boost::make_shared(*gf); return result; } } @@ -274,7 +331,16 @@ namespace gtsam { void GaussianFactorGraph::multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const { BOOST_FOREACH(const GaussianFactor::shared_ptr& f, *this) - f->multiplyHessianAdd(alpha, x, y); + f->multiplyHessianAdd(alpha, x, y); + } + + /* ************************************************************************* */ + void GaussianFactorGraph::multiplyHessianAdd(double alpha, + const double* x, double* y) const { + vector FactorKeys = getkeydim(); + BOOST_FOREACH(const GaussianFactor::shared_ptr& f, *this) + f->multiplyHessianAdd(alpha, x, y, FactorKeys); + } /* ************************************************************************* */ diff --git a/gtsam/linear/GaussianFactorGraph.h b/gtsam/linear/GaussianFactorGraph.h index e12c892de..267792f33 100644 --- a/gtsam/linear/GaussianFactorGraph.h +++ b/gtsam/linear/GaussianFactorGraph.h @@ -135,11 +135,15 @@ namespace gtsam { typedef FastSet Keys; Keys keys() const; + std::vector getkeydim() const; + /** unnormalized error */ double error(const VectorValues& x) const { double total_error = 0.; - BOOST_FOREACH(const sharedFactor& factor, *this) - total_error += factor->error(x); + BOOST_FOREACH(const sharedFactor& factor, *this){ + if(factor) + total_error += factor->error(x); + } return total_error; } @@ -219,6 +223,12 @@ namespace gtsam { */ std::pair hessian(boost::optional optionalOrdering = boost::none) const; + /** Return only the diagonal of the Hessian A'*A, as a VectorValues */ + VectorValues hessianDiagonal() const; + + /** Return the block diagonal of the Hessian for this factor */ + std::map hessianBlockDiagonal() const; + /** Solve the factor graph by performing multifrontal variable elimination in COLAMD order using * the dense elimination function specified in \c function (default EliminatePreferCholesky), * followed by back-substitution in the Bayes tree resulting from elimination. Is equivalent @@ -288,6 +298,10 @@ namespace gtsam { void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const; + ///** y += alpha*A'A*x */ + void multiplyHessianAdd(double alpha, const double* x, + double* y) const; + ///** In-place version e <- A*x that overwrites e. */ void multiplyInPlace(const VectorValues& x, Errors& e) const; diff --git a/gtsam/linear/HessianFactor.cpp b/gtsam/linear/HessianFactor.cpp index 0b1d22dd4..9d2c5192e 100644 --- a/gtsam/linear/HessianFactor.cpp +++ b/gtsam/linear/HessianFactor.cpp @@ -61,41 +61,44 @@ string SlotEntry::toString() const { } /* ************************************************************************* */ -Scatter::Scatter(const GaussianFactorGraph& gfg, boost::optional ordering) -{ +Scatter::Scatter(const GaussianFactorGraph& gfg, + boost::optional ordering) { gttic(Scatter_Constructor); static const size_t none = std::numeric_limits::max(); // First do the set union. BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, gfg) { - if(factor) { - for(GaussianFactor::const_iterator variable = factor->begin(); variable != factor->end(); ++variable) { + if (factor) { + for (GaussianFactor::const_iterator variable = factor->begin(); + variable != factor->end(); ++variable) { // TODO: Fix this hack to cope with zero-row Jacobians that come from BayesTreeOrphanWrappers - const JacobianFactor* asJacobian = dynamic_cast(factor.get()); - if(!asJacobian || asJacobian->cols() > 1) - this->insert(make_pair(*variable, SlotEntry(none, factor->getDim(variable)))); + const JacobianFactor* asJacobian = + dynamic_cast(factor.get()); + if (!asJacobian || asJacobian->cols() > 1) + this->insert( + make_pair(*variable, SlotEntry(none, factor->getDim(variable)))); } } } // If we have an ordering, pre-fill the ordered variables first size_t slot = 0; - if(ordering) { + if (ordering) { BOOST_FOREACH(Key key, *ordering) { const_iterator entry = find(key); - if(entry == end()) + if (entry == end()) throw std::invalid_argument( "The ordering provided to the HessianFactor Scatter constructor\n" - "contained extra variables that did not appear in the factors to combine."); - at(key).slot = (slot ++); + "contained extra variables that did not appear in the factors to combine."); + at(key).slot = (slot++); } } // Next fill in the slot indices (we can only get these after doing the set // union. BOOST_FOREACH(value_type& var_slot, *this) { - if(var_slot.second.slot == none) - var_slot.second.slot = (slot ++); + if (var_slot.second.slot == none) + var_slot.second.slot = (slot++); } } @@ -343,6 +346,30 @@ Matrix HessianFactor::information() const return info_.range(0, this->size(), 0, this->size()).selfadjointView(); } +/* ************************************************************************* */ +VectorValues HessianFactor::hessianDiagonal() const { + VectorValues d; + // Loop over all variables + for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) { + // Get the diagonal block, and insert its diagonal + Matrix B = info_(j, j).selfadjointView(); + d.insert(keys_[j],B.diagonal()); + } + return d; +} + +/* ************************************************************************* */ +map HessianFactor::hessianBlockDiagonal() const { + map blocks; + // Loop over all variables + for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) { + // Get the diagonal block, and insert it + Matrix B = info_(j, j).selfadjointView(); + blocks.insert(make_pair(keys_[j],B)); + } + return blocks; +} + /* ************************************************************************* */ Matrix HessianFactor::augmentedJacobian() const { @@ -494,7 +521,7 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x, // copy to yvalues for(DenseIndex i = 0; i < (DenseIndex)size(); ++i) { - bool didNotExist; + bool didNotExist; VectorValues::iterator it; boost::tie(it, didNotExist) = yvalues.tryInsert(keys_[i], Vector()); if (didNotExist) @@ -504,6 +531,48 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x, } } +/* ************************************************************************* */ +void HessianFactor::multiplyHessianAdd(double alpha, const double* x, + double* yvalues, vector offsets) const { + + // Use eigen magic to access raw memory + typedef Eigen::Matrix DVector; + typedef Eigen::Map DMap; + typedef Eigen::Map ConstDMap; + + // Create a vector of temporary y values, corresponding to rows i + vector y; + y.reserve(size()); + for (const_iterator it = begin(); it != end(); it++) + y.push_back(zero(getDim(it))); + + // Accessing the VectorValues one by one is expensive + // So we will loop over columns to access x only once per column + // And fill the above temporary y values, to be added into yvalues after + for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) { + DenseIndex i = 0; + for (; i < j; ++i) + y[i] += info_(i, j).knownOffDiagonal() + * ConstDMap(x + offsets[keys_[j]], + offsets[keys_[j] + 1] - offsets[keys_[j]]); + // blocks on the diagonal are only half + y[i] += info_(j, j).selfadjointView() + * ConstDMap(x + offsets[keys_[j]], + offsets[keys_[j] + 1] - offsets[keys_[j]]); + // for below diagonal, we take transpose block from upper triangular part + for (i = j + 1; i < (DenseIndex) size(); ++i) + y[i] += info_(i, j).knownOffDiagonal() + * ConstDMap(x + offsets[keys_[j]], + offsets[keys_[j] + 1] - offsets[keys_[j]]); + } + + // copy to yvalues + for (DenseIndex i = 0; i < (DenseIndex) size(); ++i) + DMap(yvalues + offsets[keys_[i]], offsets[keys_[i] + 1] - offsets[keys_[i]]) += + alpha * y[i]; +} + + /* ************************************************************************* */ VectorValues HessianFactor::gradientAtZero() const { VectorValues g; diff --git a/gtsam/linear/HessianFactor.h b/gtsam/linear/HessianFactor.h index a3a38e4dc..f9f8c0d26 100644 --- a/gtsam/linear/HessianFactor.h +++ b/gtsam/linear/HessianFactor.h @@ -41,21 +41,28 @@ namespace gtsam { GTSAM_EXPORT std::pair, boost::shared_ptr > EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys); - // Definition of Scatter, which is an intermediate data structure used when building a - // HessianFactor incrementally, to get the keys in the right order. The "scatter" is a map from - // global variable indices to slot indices in the union of involved variables. We also include - // the dimensionality of the variable. + /** + * One SlotEntry stores the slot index for a variable, as well its dimension. + */ struct GTSAM_EXPORT SlotEntry { - size_t slot; - size_t dimension; + size_t slot, dimension; SlotEntry(size_t _slot, size_t _dimension) : slot(_slot), dimension(_dimension) {} std::string toString() const; }; - class Scatter : public FastMap { + + /** + * Scatter is an intermediate data structure used when building a HessianFactor + * incrementally, to get the keys in the right order. The "scatter" is a map from + * global variable indices to slot indices in the union of involved variables. + * We also include the dimensionality of the variable. + */ + class Scatter: public FastMap { public: - Scatter() {} - Scatter(const GaussianFactorGraph& gfg, boost::optional ordering = boost::none); + Scatter() { + } + Scatter(const GaussianFactorGraph& gfg, + boost::optional ordering = boost::none); }; /** @@ -134,6 +141,7 @@ namespace gtsam { typedef SymmetricBlockMatrix::Block Block; ///< A block from the Hessian matrix typedef SymmetricBlockMatrix::constBlock constBlock; ///< A block from the Hessian matrix (const version) + /** default constructor for I/O */ HessianFactor(); @@ -328,7 +336,13 @@ namespace gtsam { * GaussianFactor. */ virtual Matrix information() const; - + + /// Return the diagonal of the Hessian for this factor + virtual VectorValues hessianDiagonal() const; + + /// Return the block diagonal of the Hessian for this factor + virtual std::map hessianBlockDiagonal() const; + /** * Return (dense) matrix associated with factor * @param ordering of variables needed for matrix column order @@ -363,6 +377,10 @@ namespace gtsam { /** y += alpha * A'*A*x */ void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const; + void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector keys) const; + + void multiplyHessianAdd(double alpha, const double* x, double* y) const {}; + /// eta for Hessian VectorValues gradientAtZero() const; diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp index 1468810e6..129e832e4 100644 --- a/gtsam/linear/JacobianFactor.cpp +++ b/gtsam/linear/JacobianFactor.cpp @@ -131,7 +131,7 @@ namespace gtsam { Matrix::Zero(maxrank, Ab_.matrix().cols()); // FIXME: replace with triangular system Ab_.rowEnd() = maxrank; - model_ = noiseModel::Unit::Create(maxrank); + model_ = SharedDiagonal(); // should be same as Unit::Create(maxrank); } /* ************************************************************************* */ @@ -438,6 +438,37 @@ namespace gtsam { } } + /* ************************************************************************* */ + VectorValues JacobianFactor::hessianDiagonal() const { + VectorValues d; + for(size_t pos=0; poswhiten(column_k); + dj(k) = dot(column_k,column_k); + } + d.insert(j,dj); + } + return d; + } + + /* ************************************************************************* */ + map JacobianFactor::hessianBlockDiagonal() const { + map blocks; + for(size_t pos=0; posWhiten(Aj); + blocks.insert(make_pair(j,Aj.transpose()*Aj)); + } + return blocks; + } + /* ************************************************************************* */ Vector JacobianFactor::operator*(const VectorValues& x) const { Vector Ax = zero(Ab_.rows()); @@ -458,11 +489,13 @@ namespace gtsam { // Just iterate over all A matrices and insert Ai^e into VectorValues for(size_t pos=0; pos xi = x.tryInsert(keys_[pos], Vector()); + pair xi = x.tryInsert(j, Vector()); if(xi.second) xi.first->second = Vector::Zero(getDim(begin() + pos)); gtsam::transposeMultiplyAdd(Ab_(pos), E, xi.first->second); + } } @@ -473,6 +506,32 @@ namespace gtsam { transposeMultiplyAdd(alpha,Ax,y); } + void JacobianFactor::multiplyHessianAdd(double alpha, const double* x, double* y, std::vector keys) const { + + // Use eigen magic to access raw memory + typedef Eigen::Matrix DVector; + typedef Eigen::Map DMap; + typedef Eigen::Map ConstDMap; + + if (empty()) return; + Vector Ax = zero(Ab_.rows()); + + // Just iterate over all A matrices and multiply in correct config part + for(size_t pos=0; poswhitenInPlace(Ax); model_->whitenInPlace(Ax); } + + // multiply with alpha + Ax *= alpha; + + // Again iterate over all A matrices and insert Ai^e into y + for(size_t pos=0; pos hessianBlockDiagonal() const; + /** * @brief Returns (dense) A,b pair associated with factor, bakes in the weights */ @@ -269,6 +276,10 @@ namespace gtsam { /** y += alpha * A'*A*x */ void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const; + void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector keys) const; + + void multiplyHessianAdd(double alpha, const double* x, double* y) const {}; + /// A'*b for Jacobian VectorValues gradientAtZero() const; diff --git a/gtsam/linear/KalmanFilter.cpp b/gtsam/linear/KalmanFilter.cpp index 6e413f846..68e6a00f1 100644 --- a/gtsam/linear/KalmanFilter.cpp +++ b/gtsam/linear/KalmanFilter.cpp @@ -53,7 +53,7 @@ KalmanFilter::solve(const GaussianFactorGraph& factorGraph) const { /* ************************************************************************* */ // Auxiliary function to create a small graph for predict or update and solve KalmanFilter::State // -KalmanFilter::fuse(const State& p, GaussianFactor::shared_ptr newFactor) { +KalmanFilter::fuse(const State& p, GaussianFactor::shared_ptr newFactor) const { // Create a factor graph GaussianFactorGraph factorGraph; @@ -65,7 +65,7 @@ KalmanFilter::fuse(const State& p, GaussianFactor::shared_ptr newFactor) { /* ************************************************************************* */ KalmanFilter::State KalmanFilter::init(const Vector& x0, - const SharedDiagonal& P0) { + const SharedDiagonal& P0) const { // Create a factor graph f(x0), eliminate it into P(x0) GaussianFactorGraph factorGraph; @@ -74,7 +74,7 @@ KalmanFilter::State KalmanFilter::init(const Vector& x0, } /* ************************************************************************* */ -KalmanFilter::State KalmanFilter::init(const Vector& x, const Matrix& P0) { +KalmanFilter::State KalmanFilter::init(const Vector& x, const Matrix& P0) const { // Create a factor graph f(x0), eliminate it into P(x0) GaussianFactorGraph factorGraph; @@ -89,7 +89,7 @@ void KalmanFilter::print(const string& s) const { /* ************************************************************************* */ KalmanFilter::State KalmanFilter::predict(const State& p, const Matrix& F, - const Matrix& B, const Vector& u, const SharedDiagonal& model) { + const Matrix& B, const Vector& u, const SharedDiagonal& model) const { // The factor related to the motion model is defined as // f2(x_{t},x_{t+1}) = (F*x_{t} + B*u - x_{t+1}) * Q^-1 * (F*x_{t} + B*u - x_{t+1})^T @@ -100,7 +100,7 @@ KalmanFilter::State KalmanFilter::predict(const State& p, const Matrix& F, /* ************************************************************************* */ KalmanFilter::State KalmanFilter::predictQ(const State& p, const Matrix& F, - const Matrix& B, const Vector& u, const Matrix& Q) { + const Matrix& B, const Vector& u, const Matrix& Q) const { #ifndef NDEBUG DenseIndex n = F.cols(); @@ -126,7 +126,7 @@ KalmanFilter::State KalmanFilter::predictQ(const State& p, const Matrix& F, /* ************************************************************************* */ KalmanFilter::State KalmanFilter::predict2(const State& p, const Matrix& A0, - const Matrix& A1, const Vector& b, const SharedDiagonal& model) { + const Matrix& A1, const Vector& b, const SharedDiagonal& model) const { // Nhe factor related to the motion model is defined as // f2(x_{t},x_{t+1}) = |A0*x_{t} + A1*x_{t+1} - b|^2 Key k = step(p); @@ -135,7 +135,7 @@ KalmanFilter::State KalmanFilter::predict2(const State& p, const Matrix& A0, /* ************************************************************************* */ KalmanFilter::State KalmanFilter::update(const State& p, const Matrix& H, - const Vector& z, const SharedDiagonal& model) { + const Vector& z, const SharedDiagonal& model) const { // The factor related to the measurements would be defined as // f2 = (h(x_{t}) - z_{t}) * R^-1 * (h(x_{t}) - z_{t})^T // = (x_{t} - z_{t}) * R^-1 * (x_{t} - z_{t})^T @@ -145,7 +145,7 @@ KalmanFilter::State KalmanFilter::update(const State& p, const Matrix& H, /* ************************************************************************* */ KalmanFilter::State KalmanFilter::updateQ(const State& p, const Matrix& H, - const Vector& z, const Matrix& Q) { + const Vector& z, const Matrix& Q) const { Key k = step(p); Matrix M = inverse(Q), Ht = trans(H); Matrix G = Ht * M * H; diff --git a/gtsam/linear/KalmanFilter.h b/gtsam/linear/KalmanFilter.h index 7c738d639..63271401c 100644 --- a/gtsam/linear/KalmanFilter.h +++ b/gtsam/linear/KalmanFilter.h @@ -62,7 +62,7 @@ private: const GaussianFactorGraph::Eliminate function_; /** algorithm */ State solve(const GaussianFactorGraph& factorGraph) const; - State fuse(const State& p, GaussianFactor::shared_ptr newFactor); + State fuse(const State& p, GaussianFactor::shared_ptr newFactor) const; public: @@ -80,10 +80,10 @@ public: * @param x0 estimate at time 0 * @param P0 covariance at time 0, given as a diagonal Gaussian 'model' */ - State init(const Vector& x0, const SharedDiagonal& P0); + State init(const Vector& x0, const SharedDiagonal& P0) const; /// version of init with a full covariance matrix - State init(const Vector& x0, const Matrix& P0); + State init(const Vector& x0, const Matrix& P0) const; /// print void print(const std::string& s = "") const; @@ -102,7 +102,7 @@ public: * and w is zero-mean, Gaussian white noise with covariance Q. */ State predict(const State& p, const Matrix& F, const Matrix& B, - const Vector& u, const SharedDiagonal& modelQ); + const Vector& u, const SharedDiagonal& modelQ) const; /* * Version of predict with full covariance @@ -111,7 +111,7 @@ public: * This version allows more realistic models than a diagonal covariance matrix. */ State predictQ(const State& p, const Matrix& F, const Matrix& B, - const Vector& u, const Matrix& Q); + const Vector& u, const Matrix& Q) const; /** * Predict the state P(x_{t+1}|Z^t) @@ -122,7 +122,7 @@ public: * with an optional noise model. */ State predict2(const State& p, const Matrix& A0, const Matrix& A1, - const Vector& b, const SharedDiagonal& model); + const Vector& b, const SharedDiagonal& model) const; /** * Update Kalman filter with a measurement @@ -133,7 +133,7 @@ public: * In this version, R is restricted to diagonal Gaussians (model parameter) */ State update(const State& p, const Matrix& H, const Vector& z, - const SharedDiagonal& model); + const SharedDiagonal& model) const; /* * Version of update with full covariance @@ -142,7 +142,7 @@ public: * This version allows more realistic models than a diagonal covariance matrix. */ State updateQ(const State& p, const Matrix& H, const Vector& z, - const Matrix& Q); + const Matrix& Q) const; }; } // \namespace gtsam diff --git a/gtsam/linear/NoiseModel.h b/gtsam/linear/NoiseModel.h index 04c26336f..a87f426fa 100644 --- a/gtsam/linear/NoiseModel.h +++ b/gtsam/linear/NoiseModel.h @@ -585,6 +585,10 @@ namespace gtsam { virtual Matrix Whiten(const Matrix& H) const { return H; } virtual void WhitenInPlace(Matrix& H) const {} virtual void WhitenInPlace(Eigen::Block H) const {} + virtual void whitenInPlace(Vector& v) const {} + virtual void unwhitenInPlace(Vector& v) const {} + virtual void whitenInPlace(Eigen::Block& v) const {} + virtual void unwhitenInPlace(Eigen::Block& v) const {} private: /** Serialization function */ diff --git a/gtsam/linear/tests/testGaussianFactorGraphUnordered.cpp b/gtsam/linear/tests/testGaussianFactorGraphUnordered.cpp index 6123b0071..13038bcc2 100644 --- a/gtsam/linear/tests/testGaussianFactorGraphUnordered.cpp +++ b/gtsam/linear/tests/testGaussianFactorGraphUnordered.cpp @@ -112,36 +112,51 @@ TEST(GaussianFactorGraph, matrices) { // 9 10 0 11 12 13 // 0 0 0 14 15 16 + Matrix A00 = (Matrix(2, 3) << 1, 2, 3, 5, 6, 7); + Matrix A10 = (Matrix(2, 3) << 9, 10, 0, 0, 0, 0); + Matrix A11 = (Matrix(2, 2) << 11, 12, 14, 15); + GaussianFactorGraph gfg; SharedDiagonal model = noiseModel::Unit::Create(2); - gfg.add(0, (Matrix(2, 3) << 1., 2., 3., 5., 6., 7.), (Vector(2) << 4., 8.), model); - gfg.add(0, (Matrix(2, 3) << 9.,10., 0., 0., 0., 0.), 1, (Matrix(2, 2) << 11., 12., 14., 15.), (Vector(2) << 13.,16.), model); + gfg.add(0, A00, (Vector(2) << 4., 8.), model); + gfg.add(0, A10, 1, A11, (Vector(2) << 13.,16.), model); - Matrix jacobian(4,6); - jacobian << + Matrix Ab(4,6); + Ab << 1, 2, 3, 0, 0, 4, 5, 6, 7, 0, 0, 8, 9,10, 0,11,12,13, 0, 0, 0,14,15,16; - Matrix expectedJacobian = jacobian; - Matrix expectedHessian = jacobian.transpose() * jacobian; - Matrix expectedA = jacobian.leftCols(jacobian.cols()-1); - Vector expectedb = jacobian.col(jacobian.cols()-1); - Matrix expectedL = expectedA.transpose() * expectedA; - Vector expectedeta = expectedA.transpose() * expectedb; + // augmented versions + EXPECT(assert_equal(Ab, gfg.augmentedJacobian())); + EXPECT(assert_equal(Ab.transpose() * Ab, gfg.augmentedHessian())); - Matrix actualJacobian = gfg.augmentedJacobian(); - Matrix actualHessian = gfg.augmentedHessian(); + // jacobian + Matrix A = Ab.leftCols(Ab.cols()-1); + Vector b = Ab.col(Ab.cols()-1); Matrix actualA; Vector actualb; boost::tie(actualA,actualb) = gfg.jacobian(); - Matrix actualL; Vector actualeta; boost::tie(actualL,actualeta) = gfg.hessian(); + EXPECT(assert_equal(A, actualA)); + EXPECT(assert_equal(b, actualb)); - EXPECT(assert_equal(expectedJacobian, actualJacobian)); - EXPECT(assert_equal(expectedHessian, actualHessian)); - EXPECT(assert_equal(expectedA, actualA)); - EXPECT(assert_equal(expectedb, actualb)); - EXPECT(assert_equal(expectedL, actualL)); - EXPECT(assert_equal(expectedeta, actualeta)); + // hessian + Matrix L = A.transpose() * A; + Vector eta = A.transpose() * b; + Matrix actualL; Vector actualeta; boost::tie(actualL,actualeta) = gfg.hessian(); + EXPECT(assert_equal(L, actualL)); + EXPECT(assert_equal(eta, actualeta)); + + // hessianBlockDiagonal + VectorValues expectLdiagonal; // Make explicit that diagonal is sum-squares of columns + expectLdiagonal.insert(0, (Vector(3) << 1+25+81, 4+36+100, 9+49)); + expectLdiagonal.insert(1, (Vector(2) << 121+196, 144+225)); + EXPECT(assert_equal(expectLdiagonal, gfg.hessianDiagonal())); + + // hessianBlockDiagonal + map actualBD = gfg.hessianBlockDiagonal(); + LONGS_EQUAL(2,actualBD.size()); + EXPECT(assert_equal(A00.transpose()*A00 + A10.transpose()*A10,actualBD[0])); + EXPECT(assert_equal(A11.transpose()*A11,actualBD[1])); } /* ************************************************************************* */ @@ -151,9 +166,9 @@ static GaussianFactorGraph createSimpleGaussianFactorGraph() { // linearized prior on x1: c[_x1_]+x1=0 i.e. x1=-c[_x1_] fg += JacobianFactor(2, 10*eye(2), -1.0*ones(2), unit2); // odometry between x1 and x2: x2-x1=[0.2;-0.1] - fg += JacobianFactor(2, -10*eye(2), 0, 10*eye(2), (Vector(2) << 2.0, -1.0), unit2); + fg += JacobianFactor(0, 10*eye(2), 2, -10*eye(2), (Vector(2) << 2.0, -1.0), unit2); // measurement between x1 and l1: l1-x1=[0.0;0.2] - fg += JacobianFactor(2, -5*eye(2), 1, 5*eye(2), (Vector(2) << 0.0, 1.0), unit2); + fg += JacobianFactor(1, 5*eye(2), 2, -5*eye(2), (Vector(2) << 0.0, 1.0), unit2); // measurement between x2 and l1: l1-x2=[-0.2;0.3] fg += JacobianFactor(0, -5*eye(2), 1, 5*eye(2), (Vector(2) << -1.0, 1.5), unit2); return fg; @@ -298,6 +313,31 @@ TEST( GaussianFactorGraph, multiplyHessianAdd2 ) EXPECT(assert_equal(2*expected, actual)); } +/* ************************************************************************* */ +TEST( GaussianFactorGraph, multiplyHessianAdd3 ) +{ + GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor(); + + // brute force + Matrix AtA; Vector eta; boost::tie(AtA,eta) = gfg.hessian(); + Vector X(6); X<<1,2,3,4,5,6; + Vector Y(6); Y<<-450, -450, 300, 400, 2950, 3450; + EXPECT(assert_equal(Y,AtA*X)); + + double* x = &X[0]; + double* y = &Y[0]; + + Vector fast_y = gtsam::zero(6); + double* actual = &fast_y[0]; + gfg.multiplyHessianAdd(1.0, x, fast_y.data()); + EXPECT(assert_equal(Y, fast_y)); + + // now, do it with non-zero y + gfg.multiplyHessianAdd(1.0, x, fast_y.data()); + EXPECT(assert_equal(2*Y, fast_y)); + +} + /* ************************************************************************* */ TEST( GaussianFactorGraph, matricesMixed ) diff --git a/gtsam/linear/tests/testHessianFactorUnordered.cpp b/gtsam/linear/tests/testHessianFactorUnordered.cpp index 383604c76..9cd18c974 100644 --- a/gtsam/linear/tests/testHessianFactorUnordered.cpp +++ b/gtsam/linear/tests/testHessianFactorUnordered.cpp @@ -430,11 +430,11 @@ TEST(HessianFactor, combine) { /* ************************************************************************* */ TEST(HessianFactor, gradientAtZero) { - Matrix G11 = (Matrix(1, 1) << 1.0); - Matrix G12 = (Matrix(1, 2) << 0.0, 0.0); - Matrix G22 = (Matrix(2, 2) << 1.0, 0.0, 0.0, 1.0); - Vector g1 = (Vector(1) << -7.0); - Vector g2 = (Vector(2) << -8.0, -9.0); + Matrix G11 = (Matrix(1, 1) << 1); + Matrix G12 = (Matrix(1, 2) << 0, 0); + Matrix G22 = (Matrix(2, 2) << 1, 0, 0, 1); + Vector g1 = (Vector(1) << -7); + Vector g2 = (Vector(2) << -8, -9); double f = 194; HessianFactor factor(0, 1, G11, G12, g1, G22, g2, f); @@ -448,6 +448,31 @@ TEST(HessianFactor, gradientAtZero) EXPECT(assert_equal(expectedG, actualG)); } +/* ************************************************************************* */ +TEST(HessianFactor, hessianDiagonal) +{ + Matrix G11 = (Matrix(1, 1) << 1); + Matrix G12 = (Matrix(1, 2) << 0, 0); + Matrix G22 = (Matrix(2, 2) << 1, 0, 0, 1); + Vector g1 = (Vector(1) << -7); + Vector g2 = (Vector(2) << -8, -9); + double f = 194; + + HessianFactor factor(0, 1, G11, G12, g1, G22, g2, f); + + // hessianDiagonal + VectorValues expected; + expected.insert(0, (Vector(1) << 1)); + expected.insert(1, (Vector(2) << 1,1)); + EXPECT(assert_equal(expected, factor.hessianDiagonal())); + + // hessianBlockDiagonal + map actualBD = factor.hessianBlockDiagonal(); + LONGS_EQUAL(2,actualBD.size()); + EXPECT(assert_equal(G11,actualBD[0])); + EXPECT(assert_equal(G22,actualBD[1])); + } + /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr);} /* ************************************************************************* */ diff --git a/gtsam/linear/tests/testJacobianFactorUnordered.cpp b/gtsam/linear/tests/testJacobianFactorUnordered.cpp index 72afa3ae9..1fc7365e7 100644 --- a/gtsam/linear/tests/testJacobianFactorUnordered.cpp +++ b/gtsam/linear/tests/testJacobianFactorUnordered.cpp @@ -131,19 +131,16 @@ TEST(JabobianFactor, Hessian_conversion) { 1.57, 2.695, -1.1, -2.35, 2.695, 11.3125, -0.65, -10.225, -1.1, -0.65, 1, 0.5, - -2.35, -10.225, 0.5, 9.25).finished(), - (Vector(4) << -7.885, -28.5175, 2.75, 25.675).finished(), + -2.35, -10.225, 0.5, 9.25), + (Vector(4) << -7.885, -28.5175, 2.75, 25.675), 73.1725); JacobianFactor expected(0, (Matrix(2,4) << 1.2530, 2.1508, -0.8779, -1.8755, - 0, 2.5858, 0.4789, -2.3943).finished(), - (Vector(2) << -6.2929, -5.7941).finished(), - noiseModel::Unit::Create(2)); + 0, 2.5858, 0.4789, -2.3943), + (Vector(2) << -6.2929, -5.7941)); - JacobianFactor actual(hessian); - - EXPECT(assert_equal(expected, actual, 1e-3)); + EXPECT(assert_equal(expected, JacobianFactor(hessian), 1e-3)); } /* ************************************************************************* */ @@ -204,9 +201,54 @@ TEST(JacobianFactor, error) DOUBLES_EQUAL(expected_error, actual_error, 1e-10); } +/* ************************************************************************* */ +TEST(JacobianFactor, matrices_NULL) +{ + // Make sure everything works with NULL noise model + JacobianFactor factor(simple::terms, simple::b); + + Matrix jacobianExpected(3, 9); + jacobianExpected << simple::terms[0].second, simple::terms[1].second, simple::terms[2].second; + Vector rhsExpected = simple::b; + Matrix augmentedJacobianExpected(3, 10); + augmentedJacobianExpected << jacobianExpected, rhsExpected; + + Matrix augmentedHessianExpected = + augmentedJacobianExpected.transpose() * augmentedJacobianExpected; + + // Hessian + EXPECT(assert_equal(Matrix(augmentedHessianExpected.topLeftCorner(9,9)), factor.information())); + EXPECT(assert_equal(augmentedHessianExpected, factor.augmentedInformation())); + + // Whitened Jacobian + EXPECT(assert_equal(jacobianExpected, factor.jacobian().first)); + EXPECT(assert_equal(rhsExpected, factor.jacobian().second)); + EXPECT(assert_equal(augmentedJacobianExpected, factor.augmentedJacobian())); + + // Unwhitened Jacobian + EXPECT(assert_equal(jacobianExpected, factor.jacobianUnweighted().first)); + EXPECT(assert_equal(rhsExpected, factor.jacobianUnweighted().second)); + EXPECT(assert_equal(augmentedJacobianExpected, factor.augmentedJacobianUnweighted())); + + // hessianDiagonal + VectorValues expectDiagonal; + expectDiagonal.insert(5, ones(3)); + expectDiagonal.insert(10, 4*ones(3)); + expectDiagonal.insert(15, 9*ones(3)); + EXPECT(assert_equal(expectDiagonal, factor.hessianDiagonal())); + + // hessianBlockDiagonal + map actualBD = factor.hessianBlockDiagonal(); + LONGS_EQUAL(3,actualBD.size()); + EXPECT(assert_equal(1*eye(3),actualBD[5])); + EXPECT(assert_equal(4*eye(3),actualBD[10])); + EXPECT(assert_equal(9*eye(3),actualBD[15])); +} + /* ************************************************************************* */ TEST(JacobianFactor, matrices) { + // And now witgh a non-unit noise model JacobianFactor factor(simple::terms, simple::b, simple::noise); Matrix jacobianExpected(3, 9); @@ -232,6 +274,21 @@ TEST(JacobianFactor, matrices) EXPECT(assert_equal(jacobianExpected, factor.jacobianUnweighted().first)); EXPECT(assert_equal(rhsExpected, factor.jacobianUnweighted().second)); EXPECT(assert_equal(augmentedJacobianExpected, factor.augmentedJacobianUnweighted())); + + // hessianDiagonal + VectorValues expectDiagonal; + // below we divide by the variance 0.5^2 + expectDiagonal.insert(5, (Vector(3) << 1, 1, 1)/0.25); + expectDiagonal.insert(10, (Vector(3) << 4, 4, 4)/0.25); + expectDiagonal.insert(15, (Vector(3) << 9, 9, 9)/0.25); + EXPECT(assert_equal(expectDiagonal, factor.hessianDiagonal())); + + // hessianBlockDiagonal + map actualBD = factor.hessianBlockDiagonal(); + LONGS_EQUAL(3,actualBD.size()); + EXPECT(assert_equal(4*eye(3),actualBD[5])); + EXPECT(assert_equal(16*eye(3),actualBD[10])); + EXPECT(assert_equal(36*eye(3),actualBD[15])); } /* ************************************************************************* */ diff --git a/gtsam/navigation/.gitignore b/gtsam/navigation/.gitignore new file mode 100644 index 000000000..798d17f47 --- /dev/null +++ b/gtsam/navigation/.gitignore @@ -0,0 +1 @@ +/*.*~ diff --git a/gtsam/navigation/CombinedImuFactor.h b/gtsam/navigation/CombinedImuFactor.h index 72d31b294..559568c84 100644 --- a/gtsam/navigation/CombinedImuFactor.h +++ b/gtsam/navigation/CombinedImuFactor.h @@ -48,39 +48,6 @@ namespace gtsam { /** Struct to store results of preintegrating IMU measurements. Can be build * incrementally so as to avoid costly integration at time of factor construction. */ - /** Right Jacobian for Exponential map in SO(3) - equation (10.86) and following equations in [1] */ - static Matrix3 rightJacobianExpMapSO3(const Vector3& x) { - // x is the axis-angle representation (exponential coordinates) for a rotation - double normx = norm_2(x); // rotation angle - Matrix3 Jr; - if (normx < 10e-8){ - Jr = Matrix3::Identity(); - } - else{ - const Matrix3 X = skewSymmetric(x); // element of Lie algebra so(3): X = x^ - Jr = Matrix3::Identity() - ((1-cos(normx))/(normx*normx)) * X + - ((normx-sin(normx))/(normx*normx*normx)) * X * X; // right Jacobian - } - return Jr; - } - - /** Right Jacobian for Log map in SO(3) - equation (10.86) and following equations in [1] */ - static Matrix3 rightJacobianExpMapSO3inverse(const Vector3& x) { - // x is the axis-angle representation (exponential coordinates) for a rotation - double normx = norm_2(x); // rotation angle - Matrix3 Jrinv; - - if (normx < 10e-8){ - Jrinv = Matrix3::Identity(); - } - else{ - const Matrix3 X = skewSymmetric(x); // element of Lie algebra so(3): X = x^ - Jrinv = Matrix3::Identity() + - 0.5 * X + (1/(normx*normx) - (1+cos(normx))/(2*normx * sin(normx)) ) * X * X; - } - return Jrinv; - } - /** CombinedPreintegratedMeasurements accumulates (integrates) the IMU measurements (rotation rates and accelerations) * and the corresponding covariance matrix. The measurements are then used to build the Preintegrated IMU factor*/ class CombinedPreintegratedMeasurements { @@ -187,7 +154,7 @@ namespace gtsam { const Vector3 theta_incr = correctedOmega * deltaT; // rotation vector describing rotation increment computed from the current rotation rate measurement const Rot3 Rincr = Rot3::Expmap(theta_incr); // rotation increment computed from the current rotation rate measurement - const Matrix3 Jr_theta_incr = rightJacobianExpMapSO3(theta_incr); // Right jacobian computed at theta_incr + const Matrix3 Jr_theta_incr = Rot3::rightJacobianExpMapSO3(theta_incr); // Right jacobian computed at theta_incr // Update Jacobians /* ----------------------------------------------------------------------------------------------------------------------- */ @@ -208,11 +175,11 @@ namespace gtsam { Matrix3 Z_3x3 = Matrix3::Zero(); Matrix3 I_3x3 = Matrix3::Identity(); const Vector3 theta_i = Rot3::Logmap(deltaRij); // parametrization of so(3) - const Matrix3 Jr_theta_i = rightJacobianExpMapSO3(theta_i); + const Matrix3 Jr_theta_i = Rot3::rightJacobianExpMapSO3(theta_i); Rot3 Rot_j = deltaRij * Rincr; const Vector3 theta_j = Rot3::Logmap(Rot_j); // parametrization of so(3) - const Matrix3 Jrinv_theta_j = rightJacobianExpMapSO3inverse(theta_j); + const Matrix3 Jrinv_theta_j = Rot3::rightJacobianExpMapSO3inverse(theta_j); // Single Jacobians to propagate covariance Matrix3 H_pos_pos = I_3x3; @@ -436,11 +403,11 @@ namespace gtsam { const Rot3 fRhat = deltaRij_biascorrected_corioliscorrected.between(Rot_i.between(Rot_j)); - const Matrix3 Jr_theta_bcc = rightJacobianExpMapSO3(theta_biascorrected_corioliscorrected); + const Matrix3 Jr_theta_bcc = Rot3::rightJacobianExpMapSO3(theta_biascorrected_corioliscorrected); const Matrix3 Jtheta = -Jr_theta_bcc * skewSymmetric(Rot_i.inverse().matrix() * omegaCoriolis_ * deltaTij); - const Matrix3 Jrinv_fRhat = rightJacobianExpMapSO3inverse(Rot3::Logmap(fRhat)); + const Matrix3 Jrinv_fRhat = Rot3::rightJacobianExpMapSO3inverse(Rot3::Logmap(fRhat)); if(H1) { H1->resize(15,6); @@ -515,8 +482,8 @@ namespace gtsam { } if(H5) { - const Matrix3 Jrinv_theta_bc = rightJacobianExpMapSO3inverse(theta_biascorrected); - const Matrix3 Jr_JbiasOmegaIncr = rightJacobianExpMapSO3(preintegratedMeasurements_.delRdelBiasOmega * biasOmegaIncr); + const Matrix3 Jrinv_theta_bc = Rot3::rightJacobianExpMapSO3inverse(theta_biascorrected); + const Matrix3 Jr_JbiasOmegaIncr = Rot3::rightJacobianExpMapSO3(preintegratedMeasurements_.delRdelBiasOmega * biasOmegaIncr); const Matrix3 JbiasOmega = Jr_theta_bcc * Jrinv_theta_bc * Jr_JbiasOmegaIncr * preintegratedMeasurements_.delRdelBiasOmega; H5->resize(15,6); diff --git a/gtsam/navigation/ImuFactor.h b/gtsam/navigation/ImuFactor.h index 0ec4c56e2..b1e20297e 100644 --- a/gtsam/navigation/ImuFactor.h +++ b/gtsam/navigation/ImuFactor.h @@ -47,39 +47,6 @@ namespace gtsam { /** Struct to store results of preintegrating IMU measurements. Can be build * incrementally so as to avoid costly integration at time of factor construction. */ - /** Right Jacobian for Exponential map in SO(3) - equation (10.86) and following equations in [1] */ - static Matrix3 rightJacobianExpMapSO3(const Vector3& x) { - // x is the axis-angle representation (exponential coordinates) for a rotation - double normx = norm_2(x); // rotation angle - Matrix3 Jr; - if (normx < 10e-8){ - Jr = Matrix3::Identity(); - } - else{ - const Matrix3 X = skewSymmetric(x); // element of Lie algebra so(3): X = x^ - Jr = Matrix3::Identity() - ((1-cos(normx))/(normx*normx)) * X + - ((normx-sin(normx))/(normx*normx*normx)) * X * X; // right Jacobian - } - return Jr; - } - - /** Right Jacobian for Log map in SO(3) - equation (10.86) and following equations in [1] */ - static Matrix3 rightJacobianExpMapSO3inverse(const Vector3& x) { - // x is the axis-angle representation (exponential coordinates) for a rotation - double normx = norm_2(x); // rotation angle - Matrix3 Jrinv; - - if (normx < 10e-8){ - Jrinv = Matrix3::Identity(); - } - else{ - const Matrix3 X = skewSymmetric(x); // element of Lie algebra so(3): X = x^ - Jrinv = Matrix3::Identity() + - 0.5 * X + (1/(normx*normx) - (1+cos(normx))/(2*normx * sin(normx)) ) * X * X; - } - return Jrinv; - } - /** CombinedPreintegratedMeasurements accumulates (integrates) the IMU measurements (rotation rates and accelerations) * and the corresponding covariance matrix. The measurements are then used to build the Preintegrated IMU factor*/ class PreintegratedMeasurements { @@ -182,7 +149,7 @@ namespace gtsam { const Vector3 theta_incr = correctedOmega * deltaT; // rotation vector describing rotation increment computed from the current rotation rate measurement const Rot3 Rincr = Rot3::Expmap(theta_incr); // rotation increment computed from the current rotation rate measurement - const Matrix3 Jr_theta_incr = rightJacobianExpMapSO3(theta_incr); // Right jacobian computed at theta_incr + const Matrix3 Jr_theta_incr = Rot3::rightJacobianExpMapSO3(theta_incr); // Right jacobian computed at theta_incr // Update Jacobians /* ----------------------------------------------------------------------------------------------------------------------- */ @@ -200,11 +167,11 @@ namespace gtsam { Matrix3 Z_3x3 = Matrix3::Zero(); Matrix3 I_3x3 = Matrix3::Identity(); const Vector3 theta_i = Rot3::Logmap(deltaRij); // parametrization of so(3) - const Matrix3 Jr_theta_i = rightJacobianExpMapSO3(theta_i); + const Matrix3 Jr_theta_i = Rot3::rightJacobianExpMapSO3(theta_i); Rot3 Rot_j = deltaRij * Rincr; const Vector3 theta_j = Rot3::Logmap(Rot_j); // parametrization of so(3) - const Matrix3 Jrinv_theta_j = rightJacobianExpMapSO3inverse(theta_j); + const Matrix3 Jrinv_theta_j = Rot3::rightJacobianExpMapSO3inverse(theta_j); // Update preintegrated measurements covariance: as in [2] we consider a first order propagation that // can be seen as a prediction phase in an EKF framework @@ -397,11 +364,11 @@ namespace gtsam { const Rot3 fRhat = deltaRij_biascorrected_corioliscorrected.between(Rot_i.between(Rot_j)); - const Matrix3 Jr_theta_bcc = rightJacobianExpMapSO3(theta_biascorrected_corioliscorrected); + const Matrix3 Jr_theta_bcc = Rot3::rightJacobianExpMapSO3(theta_biascorrected_corioliscorrected); const Matrix3 Jtheta = -Jr_theta_bcc * skewSymmetric(Rot_i.inverse().matrix() * omegaCoriolis_ * deltaTij); - const Matrix3 Jrinv_fRhat = rightJacobianExpMapSO3inverse(Rot3::Logmap(fRhat)); + const Matrix3 Jrinv_fRhat = Rot3::rightJacobianExpMapSO3inverse(Rot3::Logmap(fRhat)); if(H1) { H1->resize(9,6); @@ -457,8 +424,8 @@ namespace gtsam { } if(H5) { - const Matrix3 Jrinv_theta_bc = rightJacobianExpMapSO3inverse(theta_biascorrected); - const Matrix3 Jr_JbiasOmegaIncr = rightJacobianExpMapSO3(preintegratedMeasurements_.delRdelBiasOmega * biasOmegaIncr); + const Matrix3 Jrinv_theta_bc = Rot3::rightJacobianExpMapSO3inverse(theta_biascorrected); + const Matrix3 Jr_JbiasOmegaIncr = Rot3::rightJacobianExpMapSO3(preintegratedMeasurements_.delRdelBiasOmega * biasOmegaIncr); const Matrix3 JbiasOmega = Jr_theta_bcc * Jrinv_theta_bc * Jr_JbiasOmegaIncr * preintegratedMeasurements_.delRdelBiasOmega; H5->resize(9,6); diff --git a/gtsam/navigation/tests/testImuFactor.cpp b/gtsam/navigation/tests/testImuFactor.cpp index cc88505bd..6cd286f68 100644 --- a/gtsam/navigation/tests/testImuFactor.cpp +++ b/gtsam/navigation/tests/testImuFactor.cpp @@ -319,7 +319,7 @@ TEST( ImuFactor, PartialDerivativeExpmap ) Matrix expectedDelRdelBiasOmega = numericalDerivative11(boost::bind( &evaluateRotation, measuredOmega, _1, deltaT), LieVector(biasOmega)); - const Matrix3 Jr = ImuFactor::rightJacobianExpMapSO3((measuredOmega - biasOmega) * deltaT); + const Matrix3 Jr = Rot3::rightJacobianExpMapSO3((measuredOmega - biasOmega) * deltaT); Matrix3 actualdelRdelBiasOmega = - Jr * deltaT; // the delta bias appears with the minus sign @@ -371,7 +371,7 @@ TEST( ImuFactor, fistOrderExponential ) Vector3 deltabiasOmega; deltabiasOmega << alpha,alpha,alpha; - const Matrix3 Jr = ImuFactor::rightJacobianExpMapSO3((measuredOmega - biasOmega) * deltaT); + const Matrix3 Jr = Rot3::rightJacobianExpMapSO3((measuredOmega - biasOmega) * deltaT); Matrix3 delRdelBiasOmega = - Jr * deltaT; // the delta bias appears with the minus sign diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 4493a0b16..94190e687 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -13,6 +13,7 @@ * @file LevenbergMarquardtOptimizer.cpp * @brief * @author Richard Roberts + * @author Luca Carlone * @date Feb 26, 2012 */ @@ -20,16 +21,21 @@ #include #include #include +#include #include +#include #include #include #include using namespace std; + namespace gtsam { +using boost::adaptors::map_values; + /* ************************************************************************* */ LevenbergMarquardtParams::VerbosityLM LevenbergMarquardtParams::verbosityLMTranslator(const std::string &src) const { std::string s = src; boost::algorithm::to_upper(s); @@ -48,13 +54,14 @@ LevenbergMarquardtParams::VerbosityLM LevenbergMarquardtParams::verbosityLMTrans std::string LevenbergMarquardtParams::verbosityLMTranslator(VerbosityLM value) const { std::string s; switch (value) { - case LevenbergMarquardtParams::SILENT: s = "SILENT" ; break; - case LevenbergMarquardtParams::LAMBDA: s = "LAMBDA" ; break; - case LevenbergMarquardtParams::TRYLAMBDA: s = "TRYLAMBDA" ; break; - case LevenbergMarquardtParams::TRYCONFIG: s = "TRYCONFIG" ; break; - case LevenbergMarquardtParams::TRYDELTA: s = "TRYDELTA" ; break; - case LevenbergMarquardtParams::DAMPED: s = "DAMPED" ; break; - default: s = "UNDEFINED" ; break; + case LevenbergMarquardtParams::SILENT: s = "SILENT" ; break; + case LevenbergMarquardtParams::TERMINATION: s = "TERMINATION" ; break; + case LevenbergMarquardtParams::LAMBDA: s = "LAMBDA" ; break; + case LevenbergMarquardtParams::TRYLAMBDA: s = "TRYLAMBDA" ; break; + case LevenbergMarquardtParams::TRYCONFIG: s = "TRYCONFIG" ; break; + case LevenbergMarquardtParams::TRYDELTA: s = "TRYDELTA" ; break; + case LevenbergMarquardtParams::DAMPED: s = "DAMPED" ; break; + default: s = "UNDEFINED" ; break; } return s; } @@ -65,6 +72,10 @@ void LevenbergMarquardtParams::print(const std::string& str) const { std::cout << " lambdaInitial: " << lambdaInitial << "\n"; std::cout << " lambdaFactor: " << lambdaFactor << "\n"; std::cout << " lambdaUpperBound: " << lambdaUpperBound << "\n"; + std::cout << " lambdaLowerBound: " << lambdaLowerBound << "\n"; + std::cout << " disableInnerIterations: " << disableInnerIterations << "\n"; + std::cout << " minModelFidelity: " << minModelFidelity << "\n"; + std::cout << " diagonalDamping: " << diagonalDamping << "\n"; std::cout << " verbosityLM: " << verbosityLMTranslator(verbosityLM) << "\n"; std::cout.flush(); } @@ -76,12 +87,75 @@ GaussianFactorGraph::shared_ptr LevenbergMarquardtOptimizer::linearize() const { /* ************************************************************************* */ void LevenbergMarquardtOptimizer::increaseLambda(){ - state_.lambda *= params_.lambdaFactor; + if(params_.useFixedLambdaFactor_){ + state_.lambda *= params_.lambdaFactor; + }else{ + state_.lambda *= params_.lambdaFactor; + params_.lambdaFactor *= 2.0; + } + params_.reuse_diagonal_ = true; } /* ************************************************************************* */ -void LevenbergMarquardtOptimizer::decreaseLambda(){ - state_.lambda /= params_.lambdaFactor; +void LevenbergMarquardtOptimizer::decreaseLambda(double stepQuality){ + + if(params_.useFixedLambdaFactor_){ + state_.lambda /= params_.lambdaFactor; + }else{ + // CHECK_GT(step_quality, 0.0); + state_.lambda *= std::max(1.0 / 3.0, 1.0 - pow(2.0 * stepQuality - 1.0, 3)); + params_.lambdaFactor = 2.0; + } + state_.lambda = std::max(params_.lambdaLowerBound, state_.lambda); + params_.reuse_diagonal_ = false; + +} + +/* ************************************************************************* */ +GaussianFactorGraph LevenbergMarquardtOptimizer::buildDampedSystem( + const GaussianFactorGraph& linear) { + + //Set two parameters as Ceres, will move out later + static const double min_diagonal_ = 1e-6; + static const double max_diagonal_ = 1e32; + + gttic(damp); + if (params_.verbosityLM >= LevenbergMarquardtParams::DAMPED) + cout << "building damped system with lambda " << state_.lambda << endl; + + // Only retrieve diagonal vector when reuse_diagonal = false + if (params_.diagonalDamping && params_.reuse_diagonal_ == false) { + state_.hessianDiagonal = linear.hessianDiagonal(); + BOOST_FOREACH(Vector& v, state_.hessianDiagonal | map_values) { + for (int aa = 0; aa < v.size(); aa++) { + v(aa) = std::min(std::max(v(aa), min_diagonal_), max_diagonal_); + v(aa) = sqrt(v(aa)); + } + } + } // reuse diagonal + + // for each of the variables, add a prior + double sigma = 1.0 / std::sqrt(state_.lambda); + GaussianFactorGraph damped = linear; + damped.reserve(damped.size() + state_.values.size()); + BOOST_FOREACH(const Values::KeyValuePair& key_value, state_.values) { + size_t dim = key_value.value.dim(); + Matrix A = Matrix::Identity(dim, dim); + //Replace the identity matrix with diagonal of Hessian + if (params_.diagonalDamping){ + try { + A.diagonal() = state_.hessianDiagonal.at(key_value.key); + } catch (std::exception e) { + // Don't attempt any damping if no key found in diagonal + continue; + } + } + Vector b = Vector::Zero(dim); + SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, sigma); + damped += boost::make_shared(key_value.key, A, b, model); + } + gttoc(damp); + return damped; } /* ************************************************************************* */ @@ -94,116 +168,115 @@ void LevenbergMarquardtOptimizer::iterate() { const LevenbergMarquardtParams::VerbosityLM lmVerbosity = params_.verbosityLM; // Linearize graph - if(nloVerbosity >= NonlinearOptimizerParams::ERROR) - cout << "linearizing = " << endl; + if(lmVerbosity >= LevenbergMarquardtParams::DAMPED) cout << "linearizing = " << endl; GaussianFactorGraph::shared_ptr linear = linearize(); // Keep increasing lambda until we make make progress while (true) { - ++state_.totalNumberInnerIterations; - // Add prior-factors - // TODO: replace this dampening with a backsubstitution approach - gttic(damp); - if (lmVerbosity >= LevenbergMarquardtParams::DAMPED) cout << "building damped system" << endl; - GaussianFactorGraph dampedSystem = *linear; - { - double sigma = 1.0 / std::sqrt(state_.lambda); - dampedSystem.reserve(dampedSystem.size() + state_.values.size()); - // for each of the variables, add a prior - BOOST_FOREACH(const Values::KeyValuePair& key_value, state_.values) { - size_t dim = key_value.value.dim(); - Matrix A = Matrix::Identity(dim, dim); - Vector b = Vector::Zero(dim); - SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, sigma); - dampedSystem += boost::make_shared(key_value.key, A, b, model); - } + + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "trying lambda = " << state_.lambda << endl; + + // Build damped system for this lambda (adds prior factors that make it like gradient descent) + GaussianFactorGraph dampedSystem = buildDampedSystem(*linear); + + // Log current error/lambda to file + if (!params_.logFile.empty()) { + ofstream os(params_.logFile.c_str(), ios::app); + + boost::posix_time::ptime currentTime = boost::posix_time::microsec_clock::universal_time(); + + os << state_.totalNumberInnerIterations << "," << 1e-6 * (currentTime - state_.startTime).total_microseconds() << "," + << state_.error << "," << state_.lambda << endl; } - gttoc(damp); + + ++state_.totalNumberInnerIterations; // Try solving + double modelFidelity = 0.0; + bool step_is_successful = false; + bool stopSearchingLambda = false; + double newError; + Values newValues; + VectorValues delta; + + bool systemSolvedSuccessfully; try { - if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) - cout << "trying lambda = " << state_.lambda << endl; - // Log current error/lambda to file - if (!params_.logFile.empty()) { - ofstream os(params_.logFile.c_str(), ios::app); + delta = solve(dampedSystem, state_.values, params_); + systemSolvedSuccessfully = true; + } catch(IndeterminantLinearSystemException& e) { + systemSolvedSuccessfully = false; + } - boost::posix_time::ptime currentTime = boost::posix_time::microsec_clock::universal_time(); - - os << state_.iterations << "," << 1e-6 * (currentTime - state_.startTime).total_microseconds() << "," - << state_.error << "," << state_.lambda << endl; - } - - // Solve Damped Gaussian Factor Graph - const VectorValues delta = solve(dampedSystem, state_.values, params_); + if(systemSolvedSuccessfully) { + params_.reuse_diagonal_ = true; if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "linear delta norm = " << delta.norm() << endl; if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta.print("delta"); - // update values - gttic (retract); - Values newValues = state_.values.retract(delta); - gttoc(retract); + // cost change in the linearized system (old - new) + double newlinearizedError = linear->error(delta); - // create new optimization state with more adventurous lambda - gttic (compute_error); - if(nloVerbosity >= NonlinearOptimizerParams::ERROR) cout << "calculating error" << endl; - double error = graph_.error(newValues); - gttoc(compute_error); + double linearizedCostChange = state_.error - newlinearizedError; - if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl; + if(linearizedCostChange >= 0){ // step is valid + // update values + gttic (retract); + newValues = state_.values.retract(delta); + gttoc(retract); - if (error <= state_.error) { - state_.values.swap(newValues); - state_.error = error; - decreaseLambda(); - break; - } else { - // Either we're not cautious, or the same lambda was worse than the current error. - // The more adventurous lambda was worse too, so make lambda more conservative - // and keep the same values. - if(state_.lambda >= params_.lambdaUpperBound) { - if(nloVerbosity >= NonlinearOptimizerParams::ERROR) - cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; - break; + // compute new error + gttic (compute_error); + if(lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "calculating error" << endl; + newError = graph_.error(newValues); + gttoc(compute_error); + + // cost change in the original, nonlinear system (old - new) + double costChange = state_.error - newError; + + double absolute_function_tolerance = params_.relativeErrorTol * state_.error; + if (fabs(costChange) >= absolute_function_tolerance) { + // fidelity of linearized model VS original system between + if(linearizedCostChange > 1e-15){ + modelFidelity = costChange / linearizedCostChange; + // if we decrease the error in the nonlinear system and modelFidelity is above threshold + step_is_successful = modelFidelity > params_.minModelFidelity; + }else{ + step_is_successful = true; // linearizedCostChange close to zero + } } else { - if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) - cout << "increasing lambda: old error (" << state_.error << ") new error (" << error << ")" << endl; - - increaseLambda(); + stopSearchingLambda = true; } } - } catch (IndeterminantLinearSystemException& e) { - (void) e; // Prevent unused variable warning - if(lmVerbosity >= LevenbergMarquardtParams::LAMBDA) - cout << "Negative matrix, increasing lambda" << endl; - // Either we're not cautious, or the same lambda was worse than the current error. - // The more adventurous lambda was worse too, so make lambda more conservative - // and keep the same values. + } + + if(step_is_successful){ // we have successfully decreased the cost and we have good modelFidelity + state_.values.swap(newValues); + state_.error = newError; + decreaseLambda(modelFidelity); + break; + }else if (!stopSearchingLambda){ // we failed to solved the system or we had no decrease in cost + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) + cout << "increasing lambda: old error (" << state_.error << ") new error (" << newError << ")" << endl; + increaseLambda(); + + // check if lambda is too big if(state_.lambda >= params_.lambdaUpperBound) { - if(nloVerbosity >= NonlinearOptimizerParams::ERROR) + if(nloVerbosity >= NonlinearOptimizerParams::TERMINATION) cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; break; - } else { - increaseLambda(); } + } else { // the change in the cost is very small and it is not worth trying bigger lambdas + break; } -// Frank asks: why would we do that? -// catch(...) { -// throw; -// } } // end while - if (lmVerbosity >= LevenbergMarquardtParams::LAMBDA) - cout << "using lambda = " << state_.lambda << endl; - // Increment the iteration counter ++state_.iterations; } /* ************************************************************************* */ LevenbergMarquardtParams LevenbergMarquardtOptimizer::ensureHasOrdering( - LevenbergMarquardtParams params, const NonlinearFactorGraph& graph) const + LevenbergMarquardtParams params, const NonlinearFactorGraph& graph) const { if(!params.ordering) params.ordering = Ordering::COLAMD(graph); diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 15e14ab47..bae82b7c3 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -21,6 +21,8 @@ #include #include +class NonlinearOptimizerMoreOptimizationTest; + namespace gtsam { class LevenbergMarquardtOptimizer; @@ -35,7 +37,7 @@ class GTSAM_EXPORT LevenbergMarquardtParams: public NonlinearOptimizerParams { public: /** See LevenbergMarquardtParams::lmVerbosity */ enum VerbosityLM { - SILENT = 0, LAMBDA, TRYLAMBDA, TRYCONFIG, DAMPED, TRYDELTA + SILENT = 0, TERMINATION, LAMBDA, TRYLAMBDA, TRYCONFIG, DAMPED, TRYDELTA }; private: @@ -47,12 +49,19 @@ public: double lambdaInitial; ///< The initial Levenberg-Marquardt damping term (default: 1e-5) double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0) double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5) + double lambdaLowerBound; ///< The minimum lambda used in LM (default: 0) VerbosityLM verbosityLM; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity + bool disableInnerIterations; ///< If enabled inner iterations on the linearized system are performed + double minModelFidelity; ///< Lower bound for the modelFidelity to accept the result of an LM iteration std::string logFile; ///< an optional CSV log file, with [iteration, time, error, labda] + bool diagonalDamping; ///< if true, use diagonal of Hessian + bool reuse_diagonal_; //an additional option in Ceres for diagonalDamping (related to efficiency) + bool useFixedLambdaFactor_; // if true applies constant increase (or decrease) to lambda according to lambdaFactor LevenbergMarquardtParams() : - lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), verbosityLM( - SILENT) { + lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), lambdaLowerBound( + 0.0), verbosityLM(SILENT), disableInnerIterations(false), minModelFidelity( + 1e-3), diagonalDamping(false), reuse_diagonal_(false), useFixedLambdaFactor_(true) { } virtual ~LevenbergMarquardtParams() { } @@ -68,12 +77,18 @@ public: inline double getlambdaUpperBound() const { return lambdaUpperBound; } + inline double getlambdaLowerBound() const { + return lambdaLowerBound; + } inline std::string getVerbosityLM() const { return verbosityLMTranslator(verbosityLM); } inline std::string getLogFile() const { return logFile; } + inline bool getDiagonalDamping() const { + return diagonalDamping; + } inline void setlambdaInitial(double value) { lambdaInitial = value; @@ -84,12 +99,22 @@ public: inline void setlambdaUpperBound(double value) { lambdaUpperBound = value; } + inline void setlambdaLowerBound(double value) { + lambdaLowerBound = value; + } inline void setVerbosityLM(const std::string &s) { verbosityLM = verbosityLMTranslator(s); } inline void setLogFile(const std::string &s) { logFile = s; } + inline void setDiagonalDamping(bool flag) { + diagonalDamping = flag; + } + + inline void setUseFixedLambdaFactor(bool flag) { + useFixedLambdaFactor_ = flag; + } }; /** @@ -101,6 +126,7 @@ public: double lambda; int totalNumberInnerIterations; // The total number of inner iterations in the optimization (for each iteration, LM may try multiple iterations with different lambdas) boost::posix_time::ptime startTime; + VectorValues hessianDiagonal; //only update hessianDiagonal when reuse_diagonal_ = false LevenbergMarquardtState() { initTime(); @@ -174,11 +200,11 @@ public: return state_.lambda; } - // Apply policy to increase lambda if the current update was successful - virtual void increaseLambda(); + // Apply policy to increase lambda if the current update was successful (stepQuality not used in the naive policy) + void increaseLambda(); - // Apply policy to decrease lambda if the current update was NOT successful - virtual void decreaseLambda(); + // Apply policy to decrease lambda if the current update was NOT successful (stepQuality not used in the naive policy) + void decreaseLambda(double stepQuality); /// Access the current number of inner iterations int getInnerIterations() const { @@ -226,9 +252,14 @@ public: return state_; } + /** Build a damped system for a specific lambda */ + GaussianFactorGraph buildDampedSystem(const GaussianFactorGraph& linear); + friend class ::NonlinearOptimizerMoreOptimizationTest; + /// @} protected: + /** Access the parameters (base class version) */ virtual const NonlinearOptimizerParams& _params() const { return params_; diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 52c1e1f08..54e5cb9e3 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -55,8 +55,12 @@ void NonlinearOptimizer::defaultOptimize() { if (params.verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << currentError << endl; // Return if we already have too many iterations - if(this->iterations() >= params.maxIterations) + if(this->iterations() >= params.maxIterations){ + if (params.verbosity >= NonlinearOptimizerParams::TERMINATION) { + cout << "iterations: " << this->iterations() << " >? " << params.maxIterations << endl; + } return; + } // Iterative loop do { @@ -72,7 +76,7 @@ void NonlinearOptimizer::defaultOptimize() { params.errorTol, currentError, this->error(), params.verbosity)); // Printing if verbose - if (params.verbosity >= NonlinearOptimizerParams::ERROR && + if (params.verbosity >= NonlinearOptimizerParams::TERMINATION && this->iterations() >= params.maxIterations) cout << "Terminating because reached maximum iterations" << endl; } @@ -152,11 +156,16 @@ bool checkConvergence(double relativeErrorTreshold, double absoluteErrorTreshold } bool converged = (relativeErrorTreshold && (relativeDecrease <= relativeErrorTreshold)) || (absoluteDecrease <= absoluteErrorTreshold); - if (verbosity >= NonlinearOptimizerParams::ERROR && converged) { + if (verbosity >= NonlinearOptimizerParams::TERMINATION && converged) { + if(absoluteDecrease >= 0.0) cout << "converged" << endl; else cout << "Warning: stopping nonlinear iterations because error increased" << endl; + + cout << "errorThreshold: " << newError << " key2()) << " behind Camera " << DefaultKeyFormatter(this->key1()) << std::endl; + return zero(2); } - return zero(2); } /** return the measured */ diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index db91fbd45..6548b1449 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -685,7 +685,7 @@ bool writeBALfromValues(const string& filename, SfM_data &data, Values& values){ Values valuesCameras = values.filter< PinholeCamera >(); if ( valuesCameras.size() == data.number_cameras() ){ // we only estimated camera poses and calibration for (size_t i = 0; i < data.number_cameras(); i++){ // for each camera - Key cameraKey = symbol('c',i); + Key cameraKey = i; // symbol('c',i); PinholeCamera camera = values.at >(cameraKey); data.cameras[i] = camera; } @@ -720,4 +720,22 @@ bool writeBALfromValues(const string& filename, SfM_data &data, Values& values){ return writeBAL(filename, data); } +Values initialCamerasEstimate(const SfM_data& db) { + Values initial; + size_t i = 0; // NO POINTS: j = 0; + BOOST_FOREACH(const SfM_Camera& camera, db.cameras) + initial.insert(i++, camera); + return initial; +} + +Values initialCamerasAndPointsEstimate(const SfM_data& db) { + Values initial; + size_t i = 0, j = 0; + BOOST_FOREACH(const SfM_Camera& camera, db.cameras) + initial.insert((i++), camera); + BOOST_FOREACH(const SfM_Track& track, db.tracks) + initial.insert(P(j++), track.p); + return initial; +} + } // \namespace gtsam diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index 0ee5aad9f..070bfc000 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -175,4 +175,19 @@ GTSAM_EXPORT Pose3 gtsam2openGL(const Rot3& R, double tx, double ty, double tz); */ GTSAM_EXPORT Pose3 gtsam2openGL(const Pose3& PoseGTSAM); +/** + * @brief This function creates initial values for cameras from db + * @param SfM_data + * @return Values + */ +GTSAM_EXPORT Values initialCamerasEstimate(const SfM_data& db); + +/** + * @brief This function creates initial values for cameras and points from db + * @param SfM_data + * @return Values + */ +GTSAM_EXPORT Values initialCamerasAndPointsEstimate(const SfM_data& db); + + } // namespace gtsam diff --git a/gtsam_unstable/geometry/TriangulationFactor.h b/gtsam_unstable/geometry/TriangulationFactor.h new file mode 100644 index 000000000..24b7918e3 --- /dev/null +++ b/gtsam_unstable/geometry/TriangulationFactor.h @@ -0,0 +1,197 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * testTriangulationFactor.h + * @date March 2, 2014 + * @author Frank Dellaert + */ + +#include +#include +#include +#include +#include + +namespace gtsam { + +/** + * Non-linear factor for a constraint derived from a 2D measurement. + * The calibration and pose are assumed known. + * @addtogroup SLAM + */ +template +class TriangulationFactor: public NoiseModelFactor1 { + +public: + + /// Camera type + typedef PinholeCamera Camera; + +protected: + + // Keep a copy of measurement and calibration for I/O + const Camera camera_; ///< Camera in which this landmark was seen + const Point2 measured_; ///< 2D measurement + + // verbosity handling for Cheirality Exceptions + const bool throwCheirality_; ///< If true, rethrows Cheirality exceptions (default: false) + const bool verboseCheirality_; ///< If true, prints text for Cheirality exceptions (default: false) + +public: + + /// shorthand for base class type + typedef NoiseModelFactor1 Base; + + /// shorthand for this class + typedef TriangulationFactor This; + + /// shorthand for a smart pointer to a factor + typedef boost::shared_ptr shared_ptr; + + /// Default constructor + TriangulationFactor() : + throwCheirality_(false), verboseCheirality_(false) { + } + + /** + * Constructor with exception-handling flags + * @param camera is the camera in which unknown landmark is seen + * @param measured is the 2 dimensional location of point in image (the measurement) + * @param model is the standard deviation + * @param pointKey is the index of the landmark + * @param throwCheirality determines whether Cheirality exceptions are rethrown + * @param verboseCheirality determines whether exceptions are printed for Cheirality + */ + TriangulationFactor(const Camera& camera, const Point2& measured, + const SharedNoiseModel& model, Key pointKey, bool throwCheirality = false, + bool verboseCheirality = false) : + Base(model, pointKey), camera_(camera), measured_(measured), throwCheirality_( + throwCheirality), verboseCheirality_(verboseCheirality) { + if (model && model->dim() != 2) + throw std::invalid_argument( + "TriangulationFactor must be created with 2-dimensional noise model."); + } + + /** Virtual destructor */ + virtual ~TriangulationFactor() { + } + + /// @return a deep copy of this factor + virtual gtsam::NonlinearFactor::shared_ptr clone() const { + return boost::static_pointer_cast( + gtsam::NonlinearFactor::shared_ptr(new This(*this))); + } + + /** + * print + * @param s optional string naming the factor + * @param keyFormatter optional formatter useful for printing Symbols + */ + void print(const std::string& s = "", const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const { + std::cout << s << "TriangulationFactor,"; + camera_.print("camera"); + measured_.print("z"); + Base::print("", keyFormatter); + } + + /// equals + virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + const This *e = dynamic_cast(&p); + return e && Base::equals(p, tol) && this->camera_.equals(e->camera_, tol) + && this->measured_.equals(e->measured_, tol); + } + + /// Evaluate error h(x)-z and optionally derivatives + Vector evaluateError(const Point3& point, boost::optional H2 = + boost::none) const { + try { + Point2 error(camera_.project(point, boost::none, H2) - measured_); + return error.vector(); + } catch (CheiralityException& e) { + if (H2) + *H2 = zeros(2, 3); + if (verboseCheirality_) + std::cout << e.what() << ": Landmark " + << DefaultKeyFormatter(this->key()) << " moved behind camera" + << std::endl; + if (throwCheirality_) + throw e; + return ones(2) * 2.0 * camera_.calibration().fx(); + } + } + + /// thread-safe (?) scratch memory for linearize + mutable VerticalBlockMatrix Ab; + mutable Matrix A; + mutable Vector b; + + /** + * Linearize to a JacobianFactor, does not support constrained noise model ! + * \f$ Ax-b \approx h(x+\delta x)-z = h(x) + A \delta x - z \f$ + * Hence \f$ b = z - h(x) = - \mathtt{error\_vector}(x) \f$ + */ + boost::shared_ptr linearize(const Values& x) const { + // Only linearize if the factor is active + if (!this->active(x)) + return boost::shared_ptr(); + + // Allocate memory for Jacobian factor, do only once + if (Ab.rows() == 0) { + std::vector dimensions(1, 3); + Ab = VerticalBlockMatrix(dimensions, 2, true); + A.resize(2,3); + b.resize(2); + } + + // Would be even better if we could pass blocks to project + const Point3& point = x.at(key()); + b = -(camera_.project(point, boost::none, A) - measured_).vector(); + if (noiseModel_) + this->noiseModel_->WhitenSystem(A, b); + + Ab(0) = A; + Ab(1) = b; + + return boost::make_shared(this->keys_, Ab); + } + + /** return the measurement */ + const Point2& measured() const { + return measured_; + } + + /** return verbosity */ + inline bool verboseCheirality() const { + return verboseCheirality_; + } + + /** return flag for throwing cheirality exceptions */ + inline bool throwCheirality() const { + return throwCheirality_; + } + +private: + + /// Serialization function + friend class boost::serialization::access; + template + void serialize(ARCHIVE & ar, const unsigned int version) { + ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); + ar & BOOST_SERIALIZATION_NVP(camera_); + ar & BOOST_SERIALIZATION_NVP(measured_); + ar & BOOST_SERIALIZATION_NVP(throwCheirality_); + ar & BOOST_SERIALIZATION_NVP(verboseCheirality_); + } +}; +} // \ namespace gtsam + diff --git a/gtsam_unstable/geometry/tests/testTriangulation.cpp b/gtsam_unstable/geometry/tests/testTriangulation.cpp index e537bdf58..8d45311f1 100644 --- a/gtsam_unstable/geometry/tests/testTriangulation.cpp +++ b/gtsam_unstable/geometry/tests/testTriangulation.cpp @@ -16,50 +16,46 @@ * Author: cbeall3 */ -#include - -#include -#include -#include -#include - -#include #include +#include +#include #include #include -#include using namespace std; using namespace gtsam; using namespace boost::assign; -/* ************************************************************************* */ +// Some common constants -TEST( triangulation, twoPosesBundler) { - boost::shared_ptr sharedCal = // - boost::make_shared(1500, 0, 0, 640, 480); - // create first camera. Looking along X-axis, 1 meter above ground plane (x-y) - Pose3 level_pose = Pose3(Rot3::ypr(-M_PI / 2, 0., -M_PI / 2), - gtsam::Point3(0, 0, 1)); - PinholeCamera level_camera(level_pose, *sharedCal); +static const boost::shared_ptr sharedCal = // + boost::make_shared(1500, 1200, 0, 640, 480); - // create second camera 1 meter to the right of first camera - Pose3 level_pose_right = level_pose * Pose3(Rot3(), Point3(1, 0, 0)); - PinholeCamera level_camera_right(level_pose_right, *sharedCal); +// Looking along X-axis, 1 meter above ground plane (x-y) +static const Rot3 upright = Rot3::ypr(-M_PI / 2, 0., -M_PI / 2); +static const Pose3 pose1 = Pose3(upright, gtsam::Point3(0, 0, 1)); +PinholeCamera camera1(pose1, *sharedCal); - // landmark ~5 meters infront of camera - Point3 landmark(5, 0.5, 1.2); +// create second camera 1 meter to the right of first camera +static const Pose3 pose2 = pose1 * Pose3(Rot3(), Point3(1, 0, 0)); +PinholeCamera camera2(pose2, *sharedCal); - // 1. Project two landmarks into two cameras and triangulate - Point2 level_uv = level_camera.project(landmark); - Point2 level_uv_right = level_camera_right.project(landmark); +// landmark ~5 meters infront of camera +static const Point3 landmark(5, 0.5, 1.2); - vector < Pose3 > poses; +// 1. Project two landmarks into two cameras and triangulate +Point2 z1 = camera1.project(landmark); +Point2 z2 = camera2.project(landmark); + +//****************************************************************************** +TEST( triangulation, twoPoses) { + + vector poses; vector measurements; - poses += level_pose, level_pose_right; - measurements += level_uv, level_uv_right; + poses += pose1, pose2; + measurements += z1, z2; bool optimize = true; double rank_tol = 1e-9; @@ -77,32 +73,48 @@ TEST( triangulation, twoPosesBundler) { EXPECT(assert_equal(landmark, *triangulated_landmark_noise, 1e-2)); } -/* ************************************************************************* */ +//****************************************************************************** -TEST( triangulation, fourPoses) { - boost::shared_ptr sharedCal = // - boost::make_shared(1500, 1200, 0, 640, 480); - // create first camera. Looking along X-axis, 1 meter above ground plane (x-y) - Pose3 level_pose = Pose3(Rot3::ypr(-M_PI / 2, 0., -M_PI / 2), - gtsam::Point3(0, 0, 1)); - SimpleCamera level_camera(level_pose, *sharedCal); +TEST( triangulation, twoPosesBundler) { - // create second camera 1 meter to the right of first camera - Pose3 level_pose_right = level_pose * Pose3(Rot3(), Point3(1, 0, 0)); - SimpleCamera level_camera_right(level_pose_right, *sharedCal); - - // landmark ~5 meters infront of camera - Point3 landmark(5, 0.5, 1.2); + boost::shared_ptr bundlerCal = // + boost::make_shared(1500, 0, 0, 640, 480); + PinholeCamera camera1(pose1, *bundlerCal); + PinholeCamera camera2(pose2, *bundlerCal); // 1. Project two landmarks into two cameras and triangulate - Point2 level_uv = level_camera.project(landmark); - Point2 level_uv_right = level_camera_right.project(landmark); + Point2 z1 = camera1.project(landmark); + Point2 z2 = camera2.project(landmark); - vector < Pose3 > poses; + vector poses; vector measurements; - poses += level_pose, level_pose_right; - measurements += level_uv, level_uv_right; + poses += pose1, pose2; + measurements += z1, z2; + + bool optimize = true; + double rank_tol = 1e-9; + + boost::optional triangulated_landmark = triangulatePoint3(poses, + bundlerCal, measurements, rank_tol, optimize); + EXPECT(assert_equal(landmark, *triangulated_landmark, 1e-2)); + + // 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) + measurements.at(0) += Point2(0.1, 0.5); + measurements.at(1) += Point2(-0.2, 0.3); + + boost::optional triangulated_landmark_noise = triangulatePoint3(poses, + bundlerCal, measurements, rank_tol, optimize); + EXPECT(assert_equal(landmark, *triangulated_landmark_noise, 1e-2)); +} + +//****************************************************************************** +TEST( triangulation, fourPoses) { + vector poses; + vector measurements; + + poses += pose1, pose2; + measurements += z1, z2; boost::optional triangulated_landmark = triangulatePoint3(poses, sharedCal, measurements); @@ -112,21 +124,20 @@ TEST( triangulation, fourPoses) { measurements.at(0) += Point2(0.1, 0.5); measurements.at(1) += Point2(-0.2, 0.3); - boost::optional triangulated_landmark_noise = triangulatePoint3(poses, - sharedCal, measurements); + boost::optional triangulated_landmark_noise = // + triangulatePoint3(poses, sharedCal, measurements); EXPECT(assert_equal(landmark, *triangulated_landmark_noise, 1e-2)); // 3. Add a slightly rotated third camera above, again with measurement noise - Pose3 pose_top = level_pose - * Pose3(Rot3::ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); - SimpleCamera camera_top(pose_top, *sharedCal); - Point2 top_uv = camera_top.project(landmark); + Pose3 pose3 = pose1 * Pose3(Rot3::ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); + SimpleCamera camera3(pose3, *sharedCal); + Point2 z3 = camera3.project(landmark); - poses += pose_top; - measurements += top_uv + Point2(0.1, -0.1); + poses += pose3; + measurements += z3 + Point2(0.1, -0.1); - boost::optional triangulated_3cameras = triangulatePoint3(poses, - sharedCal, measurements); + boost::optional triangulated_3cameras = // + triangulatePoint3(poses, sharedCal, measurements); EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2)); // Again with nonlinear optimization @@ -135,48 +146,42 @@ TEST( triangulation, fourPoses) { EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2)); // 4. Test failure: Add a 4th camera facing the wrong way - Pose3 level_pose180 = Pose3(Rot3::ypr(M_PI / 2, 0., -M_PI / 2), - Point3(0, 0, 1)); - SimpleCamera camera_180(level_pose180, *sharedCal); + Pose3 pose4 = Pose3(Rot3::ypr(M_PI / 2, 0., -M_PI / 2), Point3(0, 0, 1)); + SimpleCamera camera4(pose4, *sharedCal); - CHECK_EXCEPTION(camera_180.project(landmark) ;, CheiralityException); +#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION + CHECK_EXCEPTION(camera4.project(landmark);, CheiralityException); - poses += level_pose180; + poses += pose4; measurements += Point2(400, 400); CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements), TriangulationCheiralityException); +#endif } -/* ************************************************************************* */ - +//****************************************************************************** TEST( triangulation, fourPoses_distinct_Ks) { Cal3_S2 K1(1500, 1200, 0, 640, 480); // create first camera. Looking along X-axis, 1 meter above ground plane (x-y) - Pose3 level_pose = Pose3(Rot3::ypr(-M_PI / 2, 0., -M_PI / 2), - gtsam::Point3(0, 0, 1)); - SimpleCamera level_camera(level_pose, K1); + SimpleCamera camera1(pose1, K1); // create second camera 1 meter to the right of first camera - Pose3 level_pose_right = level_pose * Pose3(Rot3(), Point3(1, 0, 0)); Cal3_S2 K2(1600, 1300, 0, 650, 440); - SimpleCamera level_camera_right(level_pose_right, K2); - - // landmark ~5 meters infront of camera - Point3 landmark(5, 0.5, 1.2); + SimpleCamera camera2(pose2, K2); // 1. Project two landmarks into two cameras and triangulate - Point2 level_uv = level_camera.project(landmark); - Point2 level_uv_right = level_camera_right.project(landmark); + Point2 z1 = camera1.project(landmark); + Point2 z2 = camera2.project(landmark); vector cameras; vector measurements; - cameras += level_camera, level_camera_right; - measurements += level_uv, level_uv_right; + cameras += camera1, camera2; + measurements += z1, z2; - boost::optional triangulated_landmark = triangulatePoint3(cameras, - measurements); + boost::optional triangulated_landmark = // + triangulatePoint3(cameras, measurements); EXPECT(assert_equal(landmark, *triangulated_landmark, 1e-2)); // 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) @@ -188,17 +193,16 @@ TEST( triangulation, fourPoses_distinct_Ks) { EXPECT(assert_equal(landmark, *triangulated_landmark_noise, 1e-2)); // 3. Add a slightly rotated third camera above, again with measurement noise - Pose3 pose_top = level_pose - * Pose3(Rot3::ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); + Pose3 pose3 = pose1 * Pose3(Rot3::ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); Cal3_S2 K3(700, 500, 0, 640, 480); - SimpleCamera camera_top(pose_top, K3); - Point2 top_uv = camera_top.project(landmark); + SimpleCamera camera3(pose3, K3); + Point2 z3 = camera3.project(landmark); - cameras += camera_top; - measurements += top_uv + Point2(0.1, -0.1); + cameras += camera3; + measurements += z3 + Point2(0.1, -0.1); - boost::optional triangulated_3cameras = triangulatePoint3(cameras, - measurements); + boost::optional triangulated_3cameras = // + triangulatePoint3(cameras, measurements); EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2)); // Again with nonlinear optimization @@ -207,47 +211,40 @@ TEST( triangulation, fourPoses_distinct_Ks) { EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2)); // 4. Test failure: Add a 4th camera facing the wrong way - Pose3 level_pose180 = Pose3(Rot3::ypr(M_PI / 2, 0., -M_PI / 2), - Point3(0, 0, 1)); + Pose3 pose4 = Pose3(Rot3::ypr(M_PI / 2, 0., -M_PI / 2), Point3(0, 0, 1)); Cal3_S2 K4(700, 500, 0, 640, 480); - SimpleCamera camera_180(level_pose180, K4); + SimpleCamera camera4(pose4, K4); - CHECK_EXCEPTION(camera_180.project(landmark) ;, CheiralityException); +#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION + CHECK_EXCEPTION(camera4.project(landmark);, CheiralityException); - cameras += camera_180; + cameras += camera4; measurements += Point2(400, 400); CHECK_EXCEPTION(triangulatePoint3(cameras, measurements), TriangulationCheiralityException); +#endif } -/* ************************************************************************* */ - +//****************************************************************************** TEST( triangulation, twoIdenticalPoses) { - boost::shared_ptr sharedCal = // - boost::make_shared(1500, 1200, 0, 640, 480); // create first camera. Looking along X-axis, 1 meter above ground plane (x-y) - Pose3 level_pose = Pose3(Rot3::ypr(-M_PI / 2, 0., -M_PI / 2), - gtsam::Point3(0, 0, 1)); - SimpleCamera level_camera(level_pose, *sharedCal); - - // landmark ~5 meters infront of camera - Point3 landmark(5, 0.5, 1.2); + SimpleCamera camera1(pose1, *sharedCal); // 1. Project two landmarks into two cameras and triangulate - Point2 level_uv = level_camera.project(landmark); + Point2 z1 = camera1.project(landmark); - vector < Pose3 > poses; + vector poses; vector measurements; - poses += level_pose, level_pose; - measurements += level_uv, level_uv; + poses += pose1, pose1; + measurements += z1, z1; CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements), TriangulationUnderconstrainedException); } -/* ************************************************************************* * - +//****************************************************************************** +/* TEST( triangulation, onePose) { // we expect this test to fail with a TriangulationUnderconstrainedException // because there's only one camera observation @@ -265,9 +262,32 @@ TEST( triangulation, twoIdenticalPoses) { } */ - /* ************************************************************************* */ +//****************************************************************************** +TEST( triangulation, TriangulationFactor ) { + // Create the factor with a measurement that is 3 pixels off in x + Key pointKey(1); + SharedNoiseModel model; + typedef TriangulationFactor<> Factor; + Factor factor(camera1, z1, model, pointKey, sharedCal); + + // Use the factor to calculate the Jacobians + Matrix HActual; + factor.evaluateError(landmark, HActual); + +// Matrix expectedH1 = numericalDerivative11( +// boost::bind(&EssentialMatrixConstraint::evaluateError, &factor, _1, pose2, +// boost::none, boost::none), pose1); + // The expected Jacobian + Matrix HExpected = numericalDerivative11( + boost::bind(&Factor::evaluateError, &factor, _1, boost::none), landmark); + + // Verify the Jacobians are correct + CHECK(assert_equal(HExpected, HActual, 1e-3)); +} + +//****************************************************************************** int main() { TestResult tr; return TestRegistry::runAllTests(tr); } -/* ************************************************************************* */ +//****************************************************************************** diff --git a/gtsam_unstable/geometry/triangulation.cpp b/gtsam_unstable/geometry/triangulation.cpp index 559871059..3017fdf7a 100644 --- a/gtsam_unstable/geometry/triangulation.cpp +++ b/gtsam_unstable/geometry/triangulation.cpp @@ -18,6 +18,9 @@ #include +#include +#include + namespace gtsam { /** diff --git a/gtsam_unstable/geometry/triangulation.h b/gtsam_unstable/geometry/triangulation.h index 0cb9d4838..f767514c1 100644 --- a/gtsam_unstable/geometry/triangulation.h +++ b/gtsam_unstable/geometry/triangulation.h @@ -18,19 +18,11 @@ #pragma once -#include -#include -#include -#include -#include -#include -#include -#include #include - -#include -#include -#include +#include +#include +#include +#include #include @@ -60,10 +52,9 @@ public: * @param rank_tol SVD rank tolerance * @return Triangulated Point3 */ -GTSAM_UNSTABLE_EXPORT Point3 triangulateDLT(const std::vector& projection_matrices, const std::vector& measurements, double rank_tol); - -// Frank says: putting priors on poses and then optimizing is a terrible idea: we turn a 3dof problem into a much more difficult problem -// We should have a projectionfactor that knows pose is fixed +GTSAM_UNSTABLE_EXPORT Point3 triangulateDLT( + const std::vector& projection_matrices, + const std::vector& measurements, double rank_tol); /// /** @@ -87,10 +78,9 @@ std::pair triangulationGraph( static SharedNoiseModel prior_model(noiseModel::Isotropic::Sigma(6, 1e-6)); for (size_t i = 0; i < measurements.size(); i++) { const Pose3& pose_i = poses[i]; - graph.push_back(GenericProjectionFactor // - (measurements[i], unit2, i, landmarkKey, sharedCal)); - graph.push_back(PriorFactor(i, pose_i, prior_model)); - values.insert(i, pose_i); + PinholeCamera camera_i(pose_i, *sharedCal); + graph.push_back(TriangulationFactor // + (camera_i, measurements[i], unit2, landmarkKey)); } return std::make_pair(graph, values); } @@ -116,13 +106,8 @@ std::pair triangulationGraph( static SharedNoiseModel prior_model(noiseModel::Isotropic::Sigma(6, 1e-6)); for (size_t i = 0; i < measurements.size(); i++) { const PinholeCamera& camera_i = cameras[i]; - boost::shared_ptr // Seems wasteful to create new object - sharedCal(new CALIBRATION(camera_i.calibration())); - graph.push_back(GenericProjectionFactor // - (measurements[i], unit2, i, landmarkKey, sharedCal)); - const Pose3& pose_i = camera_i.pose(); - graph.push_back(PriorFactor(i, pose_i, prior_model)); - values.insert(i, pose_i); + graph.push_back(TriangulationFactor // + (camera_i, measurements[i], unit2, landmarkKey)); } return std::make_pair(graph, values); } @@ -135,7 +120,8 @@ std::pair triangulationGraph( * @param landmarkKey to refer to landmark * @return refined Point3 */ -GTSAM_UNSTABLE_EXPORT Point3 optimize(const NonlinearFactorGraph& graph, const Values& values, Key landmarkKey); +GTSAM_UNSTABLE_EXPORT Point3 optimize(const NonlinearFactorGraph& graph, + const Values& values, Key landmarkKey); /** * Given an initial estimate , refine a point using measurements in several cameras @@ -221,7 +207,7 @@ Point3 triangulatePoint3(const std::vector& poses, BOOST_FOREACH(const Pose3& pose, poses) { const Point3& p_local = pose.transform_to(point); if (p_local.z() <= 0) - throw(TriangulationCheiralityException()); + throw(TriangulationCheiralityException()); } #endif @@ -271,7 +257,7 @@ Point3 triangulatePoint3( BOOST_FOREACH(const Camera& camera, cameras) { const Point3& p_local = camera.pose().transform_to(point); if (p_local.z() <= 0) - throw(TriangulationCheiralityException()); + throw(TriangulationCheiralityException()); } #endif diff --git a/gtsam_unstable/slam/AHRS.cpp b/gtsam_unstable/slam/AHRS.cpp index 848d3ae09..af562c1b2 100644 --- a/gtsam_unstable/slam/AHRS.cpp +++ b/gtsam_unstable/slam/AHRS.cpp @@ -94,17 +94,17 @@ std::pair AHRS::initialize(double g_e) Matrix P12 = -Omega_T * H_g * Pa; Matrix P_plus_k2 = Matrix(9, 9); - P_plus_k2.block(0, 0, 3, 3) = P11; - P_plus_k2.block(0, 3, 3, 3) = Z3; - P_plus_k2.block(0, 6, 3, 3) = P12; + P_plus_k2.block<3,3>(0, 0) = P11; + P_plus_k2.block<3,3>(0, 3) = Z3; + P_plus_k2.block<3,3>(0, 6) = P12; - P_plus_k2.block(3, 0, 3, 3) = Z3; - P_plus_k2.block(3, 3, 3, 3) = Pg_; - P_plus_k2.block(3, 6, 3, 3) = Z3; + P_plus_k2.block<3,3>(3, 0) = Z3; + P_plus_k2.block<3,3>(3, 3) = Pg_; + P_plus_k2.block<3,3>(3, 6) = Z3; - P_plus_k2.block(6, 0, 3, 3) = trans(P12); - P_plus_k2.block(6, 3, 3, 3) = Z3; - P_plus_k2.block(6, 6, 3, 3) = Pa; + P_plus_k2.block<3,3>(6, 0) = trans(P12); + P_plus_k2.block<3,3>(6, 3) = Z3; + P_plus_k2.block<3,3>(6, 6) = Pa; Vector dx = zero(9); KalmanFilter::State state = KF_.init(dx, P_plus_k2); @@ -127,19 +127,20 @@ std::pair AHRS::integrate( Matrix Z3 = zeros(3, 3); Matrix F_k = zeros(9, 9); - F_k.block(0, 3, 3, 3) = -bRn; - F_k.block(3, 3, 3, 3) = F_g_; - F_k.block(6, 6, 3, 3) = F_a_; + F_k.block<3,3>(0, 3) = -bRn; + F_k.block<3,3>(3, 3) = F_g_; + F_k.block<3,3>(6, 6) = F_a_; Matrix G_k = zeros(9, 12); - G_k.block(0, 0, 3, 3) = -bRn; - G_k.block(0, 6, 3, 3) = -bRn; - G_k.block(3, 3, 3, 3) = I3; - G_k.block(6, 9, 3, 3) = I3; + G_k.block<3,3>(0, 0) = -bRn; + G_k.block<3,3>(0, 6) = -bRn; + G_k.block<3,3>(3, 3) = I3; + G_k.block<3,3>(6, 9) = I3; Matrix Q_k = G_k * var_w_ * trans(G_k); Matrix Psi_k = eye(9) + dt * F_k; // +dt*dt*F_k*F_k/2; // transition matrix + // This implements update in section 10.6 Matrix B = zeros(9, 9); Vector u2 = zero(9); KalmanFilter::State newState = KF_.predictQ(state, Psi_k,B,u2,dt*Q_k); @@ -148,21 +149,21 @@ std::pair AHRS::integrate( /* ************************************************************************* */ bool AHRS::isAidingAvailable(const Mechanization_bRn2& mech, - const gtsam::Vector& f, const gtsam::Vector& u, double ge) { + const gtsam::Vector& f, const gtsam::Vector& u, double ge) const { // Subtract the biases Vector f_ = f - mech.x_a(); Vector u_ = u - mech.x_g(); - double mu_f = f_.norm() - ge; - double mu_u = u_.norm(); - return (fabs(mu_f)<0.5 && mu_u < 5.0 / 180.0 * 3.1415926); + double mu_f = f_.norm() - ge; // accelerometer same magnitude as local gravity ? + double mu_u = u_.norm(); // gyro says we are not maneuvering ? + return (fabs(mu_f)<0.5 && mu_u < 5.0 / 180.0 * M_PI); } /* ************************************************************************* */ std::pair AHRS::aid( const Mechanization_bRn2& mech, KalmanFilter::State state, - const Vector& f, bool Farrell) { + const Vector& f, bool Farrell) const { Matrix bRn = mech.bRn().matrix(); @@ -210,7 +211,7 @@ std::pair AHRS::aid( std::pair AHRS::aidGeneral( const Mechanization_bRn2& mech, KalmanFilter::State state, const Vector& f, const Vector& f_previous, - const Rot3& R_previous) { + const Rot3& R_previous) const { Matrix increment = R_previous.between(mech.bRn()).matrix(); diff --git a/gtsam_unstable/slam/AHRS.h b/gtsam_unstable/slam/AHRS.h index 1d7eb85f5..81d74a9f5 100644 --- a/gtsam_unstable/slam/AHRS.h +++ b/gtsam_unstable/slam/AHRS.h @@ -52,15 +52,22 @@ public: const Vector& u, double dt); bool isAidingAvailable(const Mechanization_bRn2& mech, - const Vector& f, const Vector& u, double ge); + const Vector& f, const Vector& u, double ge) const; + /** + * Aid the AHRS with an accelerometer reading, typically called when isAidingAvailable == true + * @param mech current mechanization state + * @param state current Kalman filter state + * @param f accelerometer reading + * @param Farrell + */ std::pair aid( const Mechanization_bRn2& mech, KalmanFilter::State state, - const Vector& f, bool Farrell=0); + const Vector& f, bool Farrell=0) const; std::pair aidGeneral( const Mechanization_bRn2& mech, KalmanFilter::State state, - const Vector& f, const Vector& f_expected, const Rot3& R_previous); + const Vector& f, const Vector& f_expected, const Rot3& R_previous) const; /// print void print(const std::string& s = "") const; diff --git a/gtsam_unstable/slam/Mechanization_bRn2.h b/gtsam_unstable/slam/Mechanization_bRn2.h index 747ceafe7..76e4e9edd 100644 --- a/gtsam_unstable/slam/Mechanization_bRn2.h +++ b/gtsam_unstable/slam/Mechanization_bRn2.h @@ -34,14 +34,20 @@ public: bRn_(initial_bRn), x_g_(initial_x_g), x_a_(initial_x_a) { } - Vector b_g(double g_e) { - Vector n_g = (Vector(3) << 0.0, 0.0, g_e); + /// Copy constructor + Mechanization_bRn2(const Mechanization_bRn2& other) : + bRn_(other.bRn_), x_g_(other.x_g_), x_a_(other.x_a_) { + } + + /// gravity in the body frame + Vector b_g(double g_e) const { + Vector n_g = (Vector(3) << 0, 0, g_e); return (bRn_ * n_g).vector(); } - Rot3 bRn() const {return bRn_; } - Vector x_g() const { return x_g_; } - Vector x_a() const { return x_a_; } + const Rot3& bRn() const {return bRn_; } + const Vector& x_g() const { return x_g_; } + const Vector& x_a() const { return x_a_; } /** * Initialize the first Mechanization state diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 36da0fb0e..bba13d6eb 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -237,23 +237,105 @@ TEST(NonlinearOptimizer, NullFactor) { TEST(NonlinearOptimizer, MoreOptimization) { NonlinearFactorGraph fg; - fg += PriorFactor(0, Pose2(0,0,0), noiseModel::Isotropic::Sigma(3,1)); - fg += BetweenFactor(0, 1, Pose2(1,0,M_PI/2), noiseModel::Isotropic::Sigma(3,1)); - fg += BetweenFactor(1, 2, Pose2(1,0,M_PI/2), noiseModel::Isotropic::Sigma(3,1)); + fg += PriorFactor(0, Pose2(0, 0, 0), + noiseModel::Isotropic::Sigma(3, 1)); + fg += BetweenFactor(0, 1, Pose2(1, 0, M_PI / 2), + noiseModel::Isotropic::Sigma(3, 1)); + fg += BetweenFactor(1, 2, Pose2(1, 0, M_PI / 2), + noiseModel::Isotropic::Sigma(3, 1)); Values init; - init.insert(0, Pose2(3,4,-M_PI)); - init.insert(1, Pose2(10,2,-M_PI)); - init.insert(2, Pose2(11,7,-M_PI)); + init.insert(0, Pose2(3, 4, -M_PI)); + init.insert(1, Pose2(10, 2, -M_PI)); + init.insert(2, Pose2(11, 7, -M_PI)); Values expected; - expected.insert(0, Pose2(0,0,0)); - expected.insert(1, Pose2(1,0,M_PI/2)); - expected.insert(2, Pose2(1,1,M_PI)); + expected.insert(0, Pose2(0, 0, 0)); + expected.insert(1, Pose2(1, 0, M_PI / 2)); + expected.insert(2, Pose2(1, 1, M_PI)); + + VectorValues expectedGradient; + expectedGradient.insert(0,zero(3)); + expectedGradient.insert(1,zero(3)); + expectedGradient.insert(2,zero(3)); // Try LM and Dogleg - EXPECT(assert_equal(expected, LevenbergMarquardtOptimizer(fg, init).optimize())); + LevenbergMarquardtParams params; +// params.setVerbosityLM("TRYDELTA"); +// params.setVerbosity("TERMINATION"); + params.setlambdaUpperBound(1e9); +// params.setRelativeErrorTol(0); +// params.setAbsoluteErrorTol(0); + //params.setlambdaInitial(10); + + { + LevenbergMarquardtOptimizer optimizer(fg, init, params); + + // test convergence + Values actual = optimizer.optimize(); + EXPECT(assert_equal(expected, actual)); + + // Check that the gradient is zero + GaussianFactorGraph::shared_ptr linear = optimizer.linearize(); + EXPECT(assert_equal(expectedGradient,linear->gradientAtZero())); + } EXPECT(assert_equal(expected, DoglegOptimizer(fg, init).optimize())); + +// cout << "===================================================================================" << endl; + + // Try LM with diagonal damping + Values initBetter = init; + // initBetter.insert(0, Pose2(3,4,0)); + // initBetter.insert(1, Pose2(10,2,M_PI/3)); + // initBetter.insert(2, Pose2(11,7,M_PI/2)); + + { +// params.setDiagonalDamping(true); +// LevenbergMarquardtOptimizer optimizer(fg, initBetter, params); +// +// // test the diagonal +// GaussianFactorGraph::shared_ptr linear = optimizer.linearize(); +// GaussianFactorGraph damped = optimizer.buildDampedSystem(*linear); +// VectorValues d = linear->hessianDiagonal(), // +// expectedDiagonal = d + params.lambdaInitial * d; +// EXPECT(assert_equal(expectedDiagonal, damped.hessianDiagonal())); +// +// // test convergence (does not!) +// Values actual = optimizer.optimize(); +// EXPECT(assert_equal(expected, actual)); +// +// // Check that the gradient is zero (it is not!) +// linear = optimizer.linearize(); +// EXPECT(assert_equal(expectedGradient,linear->gradientAtZero())); +// +// // Check that the gradient is zero for damped system (it is not!) +// damped = optimizer.buildDampedSystem(*linear); +// VectorValues actualGradient = damped.gradientAtZero(); +// EXPECT(assert_equal(expectedGradient,actualGradient)); +// +// // Check errors at convergence and errors in direction of gradient (decreases!) +// EXPECT_DOUBLES_EQUAL(46.02558,fg.error(actual),1e-5); +// EXPECT_DOUBLES_EQUAL(44.742237,fg.error(actual.retract(-0.01*actualGradient)),1e-5); +// +// // Check that solve yields gradient (it's not equal to gradient, as predicted) +// VectorValues delta = damped.optimize(); +// double factor = actualGradient[0][0]/delta[0][0]; +// EXPECT(assert_equal(actualGradient,factor*delta)); +// +// // Still pointing downhill wrt actual gradient ! +// EXPECT_DOUBLES_EQUAL( 0.1056157,dot(-1*actualGradient,delta),1e-3); +// +// // delta.print("This is the delta value computed by LM with diagonal damping"); +// +// // Still pointing downhill wrt expected gradient (IT DOES NOT! actually they are perpendicular) +// EXPECT_DOUBLES_EQUAL( 0.0,dot(-1*expectedGradient,delta),1e-5); +// +// // Check errors at convergence and errors in direction of solution (does not decrease!) +// EXPECT_DOUBLES_EQUAL(46.0254859,fg.error(actual.retract(delta)),1e-5); +// +// // Check errors at convergence and errors at a small step in direction of solution (does not decrease!) +// EXPECT_DOUBLES_EQUAL(46.0255,fg.error(actual.retract(0.01*delta)),1e-3); + } } /* ************************************************************************* */