From 1ce95c1d89d30ac64747acf087c2c5e5e7840475 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Tue, 15 May 2012 15:49:14 +0000 Subject: [PATCH] Removed LDL in favor of Cholesky --- examples/elaboratePoint2KalmanFilter.cpp | 2 +- gtsam/base/cholesky.cpp | 67 --------- gtsam/base/cholesky.h | 19 --- gtsam/base/tests/testCholesky.cpp | 125 ---------------- gtsam/linear/GaussianBayesNet.h | 2 - gtsam/linear/GaussianConditional.cpp | 7 +- gtsam/linear/GaussianConditional.h | 26 +--- gtsam/linear/GaussianDensity.cpp | 1 - gtsam/linear/GaussianFactorGraph.cpp | 141 ------------------ gtsam/linear/GaussianFactorGraph.h | 43 ------ gtsam/linear/GaussianMultifrontalSolver.cpp | 12 +- gtsam/linear/GaussianMultifrontalSolver.h | 2 +- gtsam/linear/GaussianSequentialSolver.cpp | 10 +- gtsam/linear/GaussianSequentialSolver.h | 2 +- gtsam/linear/HessianFactor.cpp | 19 +-- gtsam/linear/HessianFactor.h | 8 +- gtsam/linear/JacobianFactor.cpp | 1 - gtsam/linear/KalmanFilter.cpp | 2 +- gtsam/linear/KalmanFilter.h | 4 +- .../linear/tests/testGaussianConditional.cpp | 43 +----- .../linear/tests/testGaussianJunctionTree.cpp | 62 +++++--- gtsam/linear/tests/testHessianFactor.cpp | 14 +- gtsam/linear/tests/testKalmanFilter.cpp | 6 +- gtsam/nonlinear/ExtendedKalmanFilter.h | 2 +- gtsam/nonlinear/ISAM2-impl.cpp | 4 +- gtsam/nonlinear/ISAM2-impl.h | 2 +- gtsam/nonlinear/ISAM2.cpp | 4 +- gtsam/nonlinear/ISAM2.h | 14 +- .../SuccessiveLinearizationOptimizer.h | 9 +- tests/testGaussianFactorGraphB.cpp | 2 +- tests/testGaussianJunctionTreeB.cpp | 4 +- tests/testNonlinearOptimizer.cpp | 7 +- 32 files changed, 98 insertions(+), 568 deletions(-) diff --git a/examples/elaboratePoint2KalmanFilter.cpp b/examples/elaboratePoint2KalmanFilter.cpp index ee0d82501..0fc4db330 100644 --- a/examples/elaboratePoint2KalmanFilter.cpp +++ b/examples/elaboratePoint2KalmanFilter.cpp @@ -35,7 +35,7 @@ using namespace gtsam; int main() { - // [code below basically does SRIF with LDL] + // [code below basically does SRIF with Cholesky] // Create a factor graph to perform the inference GaussianFactorGraph::shared_ptr linearFactorGraph(new GaussianFactorGraph); diff --git a/gtsam/base/cholesky.cpp b/gtsam/base/cholesky.cpp index c69993d45..66bbbde55 100644 --- a/gtsam/base/cholesky.cpp +++ b/gtsam/base/cholesky.cpp @@ -155,71 +155,4 @@ void choleskyPartial(Matrix& ABC, size_t nFrontal) { toc(3, "compute L"); } -/* ************************************************************************* */ -Eigen::LDLT::TranspositionType ldlPartial(Matrix& ABC, size_t nFrontal) { - - const bool debug = ISDEBUG("ldlPartial"); - - assert(ABC.rows() == ABC.cols()); - assert(ABC.rows() >= 0 && nFrontal <= size_t(ABC.rows())); - - const size_t n = ABC.rows(); - - if(debug) { - cout << "Partial LDL with " << nFrontal << " frontal scalars, "; - print(ABC, "LDL ABC: "); - } - - // Compute Cholesky factorization of A, overwrites A = sqrt(D)*R - // tic(1, "ldl"); - Eigen::LDLT ldlt; - ldlt.compute(ABC.block(0,0,nFrontal,nFrontal).selfadjointView()); - if (debug) ldlt.isNegative() ? cout << "Matrix is negative" << endl : cout << "Matrix is not negative" << endl; - - Vector D = ldlt.vectorD(); - if(dampUnderconstrained) { - D = D.array().max(Vector::Constant(D.rows(), D.cols(), underconstrainedPrior).array()); - } - - Vector sqrtD = D.cwiseSqrt(); // FIXME: we shouldn't do sqrt in LDL - if (debug) cout << "LDL Dsqrt:\n" << sqrtD << endl; - - if(D.unaryExpr(boost::bind(less(), _1, 0.0)).any()) { - if(ISDEBUG("detailed_exceptions")) - throw NegativeMatrixException(NegativeMatrixException::Detail(ABC, ldlt.matrixU(), ldlt.vectorD())); - else - throw NegativeMatrixException(); - } - - // U = sqrtD * L^ - Matrix U = ldlt.matrixU(); - if(debug) cout << "LDL U:\n" << U << endl; - - // we store the permuted upper triangular matrix - ABC.block(0,0,nFrontal,nFrontal) = sqrtD.asDiagonal() * U; // FIXME: this isn't actually LDL', it's Cholesky - if(debug) cout << "LDL R:\n" << ABC.topLeftCorner(nFrontal,nFrontal) << endl; - // toc(1, "ldl"); - - // Compute S = inv(R') * B = inv(P'U')*B = inv(U')*P*B - // tic(2, "compute S"); - if(n - nFrontal > 0) { - ABC.topRightCorner(nFrontal, n-nFrontal) = ldlt.transpositionsP() * ABC.topRightCorner(nFrontal, n-nFrontal); - ABC.block(0,0,nFrontal,nFrontal).triangularView().transpose().solveInPlace(ABC.topRightCorner(nFrontal, n-nFrontal)); - } - if(debug) cout << "LDL S:\n" << ABC.topRightCorner(nFrontal, n-nFrontal) << endl; - // toc(2, "compute S"); - - // Compute L = C - S' * S - // tic(3, "compute L"); - if(debug) cout << "LDL C:\n" << Eigen::MatrixXd(ABC.bottomRightCorner(n-nFrontal,n-nFrontal).selfadjointView()) << endl; - if(n - nFrontal > 0) - ABC.bottomRightCorner(n-nFrontal,n-nFrontal).selfadjointView().rankUpdate( - ABC.topRightCorner(nFrontal, n-nFrontal).transpose(), -1.0); - if(debug) cout << "LDL L:\n" << Eigen::MatrixXd(ABC.bottomRightCorner(n-nFrontal,n-nFrontal).selfadjointView()) << endl; - // toc(3, "compute L"); - - if(debug) cout << "LDL P: " << ldlt.transpositionsP().indices() << endl; - return ldlt.transpositionsP(); -} - } diff --git a/gtsam/base/cholesky.h b/gtsam/base/cholesky.h index fe0e68156..50d3b24a8 100644 --- a/gtsam/base/cholesky.h +++ b/gtsam/base/cholesky.h @@ -90,24 +90,5 @@ std::pair choleskyCareful(Matrix& ATA, int order = -1); */ void choleskyPartial(Matrix& ABC, size_t nFrontal); -/** - * Partial LDL computes a factor [R S such that [P'R' 0 [RP S = [A B - * 0 F] S' I] 0 F] B' C]. - * The input to this function is the matrix ABC = [A B], and the parameter - * [B' C] - * nFrontal determines the split between A, B, and C, with A being of size - * nFrontal x nFrontal. - * P is a permutation matrix obtained from the pivoting process while doing LDL'. - * - * Specifically, if A = P'U'DUP is the LDL' factorization of A, - * then R = sqrt(D)*U is the permuted upper-triangular matrix. - * The permutation is returned so that it can be used in the backsubstitution - * process to return the correct order of variables, and in creating the - * Jacobian factor by permuting R correctly. - * - * Note that S and F are not permuted (in correct original ordering). - */ -Eigen::LDLT::TranspositionType ldlPartial(Matrix& ABC, size_t nFrontal); - } diff --git a/gtsam/base/tests/testCholesky.cpp b/gtsam/base/tests/testCholesky.cpp index 5232fc6ba..63fdec810 100644 --- a/gtsam/base/tests/testCholesky.cpp +++ b/gtsam/base/tests/testCholesky.cpp @@ -53,113 +53,6 @@ TEST(cholesky, choleskyPartial) { EXPECT(assert_equal(expected, actual, 1e-9)); } -/* ************************************************************************* */ -TEST(cholesky, ldlPartial) { - - // choleskyPartial should only use the upper triangle, so this represents a - // symmetric matrix. - Matrix ABC = Matrix_(7,7, - 4.0375, 3.4584, 3.5735, 2.4815, 2.1471, 2.7400, 2.2063, - 0., 4.7267, 3.8423, 2.3624, 2.8091, 2.9579, 2.5914, - 0., 0., 5.1600, 2.0797, 3.4690, 3.2419, 2.9992, - 0., 0., 0., 1.8786, 1.0535, 1.4250, 1.3347, - 0., 0., 0., 0., 3.0788, 2.6283, 2.3791, - 0., 0., 0., 0., 0., 2.9227, 2.4056, - 0., 0., 0., 0., 0., 0., 2.5776); - - // Do partial Cholesky on 3 frontal scalar variables - Matrix RSL(ABC); - Eigen::LDLT::TranspositionType permutation = ldlPartial(RSL, 3); - - // See the function comment for choleskyPartial, this decomposition should hold. - Matrix R1 = RSL.transpose(); - Matrix R2 = RSL; - R1.block(3, 3, 4, 4).setIdentity(); - - R2.block(3, 3, 4, 4) = R2.block(3, 3, 4, 4).selfadjointView(); - - // un-permute the permuted upper triangular part - R1.topLeftCorner(3,3) = permutation.transpose() * R1.topLeftCorner(3,3); - R2.topLeftCorner(3,3) = R2.topLeftCorner(3,3)*permutation; - - Matrix actual = R1 * R2; - - Matrix expected = ABC.selfadjointView(); - EXPECT(assert_equal(expected, actual, 1e-9)); -} - -/* ************************************************************************* */ -TEST(cholesky, ldlPartial2) { - // choleskyPartial should only use the upper triangle, so this represents a - // symmetric matrix. - Matrix ABC = Matrix_(7,7, - 4.0375, 3.4584, 3.5735, 2.4815, 2.1471, 2.7400, 2.2063, - 0., 4.7267, 3.8423, 2.3624, 2.8091, 2.9579, 2.5914, - 0., 0., 5.1600, 2.0797, 3.4690, 3.2419, 2.9992, - 0., 0., 0., 1.8786, 1.0535, 1.4250, 1.3347, - 0., 0., 0., 0., 3.0788, 2.6283, 2.3791, - 0., 0., 0., 0., 0., 2.9227, 2.4056, - 0., 0., 0., 0., 0., 0., 2.5776); - - // Do partial Cholesky on 3 frontal scalar variables - Matrix RSL(ABC); - Eigen::LDLT::TranspositionType permutation = ldlPartial(RSL, 6); - - // Compute permuted R (note transposed permutation - seems to be an inconsistency in Eigen!) - Matrix Rp = RSL.topLeftCorner(6,6) * permutation.transpose(); - - Matrix expected = Matrix(ABC.selfadjointView()).topLeftCorner(6,6); - Matrix actual = Rp.transpose() * Rp; - EXPECT(assert_equal(expected, actual)); - - // Directly call LDLT and reconstruct using left and right applications of permutations, - // the permutations need to be transposed between the two cases, which seems to be an - // inconsistency in Eigen! - Matrix A = ABC.topLeftCorner(6,6); - A = A.selfadjointView(); - Eigen::LDLT ldlt; - ldlt.compute(A); - Matrix R = ldlt.vectorD().cwiseSqrt().asDiagonal() * Matrix(ldlt.matrixU()); - Rp = R * ldlt.transpositionsP(); - Matrix actual1 = Matrix::Identity(6,6); - actual1 = Matrix(actual1 * ldlt.transpositionsP()); - actual1 *= ldlt.matrixL(); - actual1 *= ldlt.vectorD().asDiagonal(); - actual1 *= ldlt.matrixU(); - actual1 = Matrix(actual1 * ldlt.transpositionsP().transpose()); - Matrix actual2 = Matrix::Identity(6,6); - actual2 = Matrix(ldlt.transpositionsP() * actual2); - actual2 = ldlt.matrixU() * actual2; - actual2 = ldlt.vectorD().asDiagonal() * actual2; - actual2 = ldlt.matrixL() * actual2; - actual2 = Matrix(ldlt.transpositionsP().transpose() * actual2); - Matrix actual3 = ldlt.reconstructedMatrix(); - EXPECT(assert_equal(expected, actual1)); - EXPECT(assert_equal(expected, actual2)); - EXPECT(assert_equal(expected, actual3)); - - // Again checking the difference between left and right permutation application - EXPECT(assert_equal(Matrix(eye(6) * ldlt.transpositionsP().transpose()), Matrix(ldlt.transpositionsP() * eye(6)))); - - // Same but with a manually-constructed permutation - Matrix I = eye(3); - Eigen::Transpositions p(3); - p.indices()[0] = 1; - p.indices()[1] = 1; - p.indices()[2] = 0; - Matrix IexpectedR = (Matrix(3,3) << - 0, 1, 0, - 0, 0, 1, - 1, 0, 0).finished(); - Matrix IexpectedL = (Matrix(3,3) << - 0, 0, 1, - 1, 0, 0, - 0, 1, 0).finished(); - EXPECT(assert_equal(IexpectedR, I*p)); - EXPECT(assert_equal(IexpectedL, p*I)); - EXPECT(assert_equal(IexpectedR, p.transpose()*I)); -} - /* ************************************************************************* */ TEST(cholesky, BadScalingCholesky) { Matrix A = Matrix_(2,2, @@ -175,24 +68,6 @@ TEST(cholesky, BadScalingCholesky) { DOUBLES_EQUAL(expectedSqrtCondition, actualSqrtCondition, 1e-41); } -/* ************************************************************************* */ -TEST(cholesky, BadScalingLDL) { - Matrix A = Matrix_(2,2, - 1.0, 0.0, - 0.0, 1e-40); - - Matrix R(A.transpose() * A); - Eigen::LDLT::TranspositionType permutation = ldlPartial(R, 2); - - EXPECT(permutation.indices()(0) == 0); - EXPECT(permutation.indices()(1) == 1); - - double expectedCondition = 1e40; - double actualCondition = R(0,0) / R(1,1); - - DOUBLES_EQUAL(expectedCondition, actualCondition, 1e-41); -} - /* ************************************************************************* */ TEST(cholesky, BadScalingSVD) { Matrix A = Matrix_(2,2, diff --git a/gtsam/linear/GaussianBayesNet.h b/gtsam/linear/GaussianBayesNet.h index 1ef9081d4..8c6e72357 100644 --- a/gtsam/linear/GaussianBayesNet.h +++ b/gtsam/linear/GaussianBayesNet.h @@ -116,8 +116,6 @@ namespace gtsam { /** * Return (dense) upper-triangular matrix representation - * NOTE: if this is the result of elimination with LDL, the matrix will - * not necessarily be upper triangular due to column permutations */ std::pair matrix(const GaussianBayesNet&); diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index 2fca36b6a..de459a00f 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -124,7 +124,6 @@ GaussianConditional& GaussianConditional::operator=(const GaussianConditional& r this->Base::operator=(rhs); // Copy keys rsd_.assignNoalias(rhs.rsd_); // Copy matrix and block configuration sigmas_ = rhs.sigmas_; // Copy sigmas - permutation_ = rhs.permutation_; // Copy permutation } return *this; } @@ -143,7 +142,6 @@ void GaussianConditional::print(const string &s) const } gtsam::print(Vector(get_d()),"d"); gtsam::print(sigmas_,"sigmas"); - cout << "Permutation: " << permutation_.indices().transpose() << endl; } /* ************************************************************************* */ @@ -196,8 +194,7 @@ inline static void doSolveInPlace(const GaussianConditional& conditional, VALUES if(debug) conditional.print("Solving conditional in place"); Vector xS = internal::extractVectorValuesSlices(x, conditional.beginParents(), conditional.endParents()); xS = conditional.get_d() - conditional.get_S() * xS; - Vector soln = conditional.permutation().transpose() * - conditional.get_R().triangularView().solve(xS); + Vector soln = conditional.get_R().triangularView().solve(xS); if(debug) { gtsam::print(Matrix(conditional.get_R()), "Calling backSubstituteUpper on "); gtsam::print(soln, "full back-substitution solution: "); @@ -219,7 +216,7 @@ void GaussianConditional::solveInPlace(Permuted& x) const { void GaussianConditional::solveTransposeInPlace(VectorValues& gy) const { Vector frontalVec = internal::extractVectorValuesSlices(gy, beginFrontals(), endFrontals()); // TODO: verify permutation - frontalVec = permutation_ * gtsam::backSubstituteUpper(frontalVec,Matrix(get_R())); + frontalVec = gtsam::backSubstituteUpper(frontalVec,Matrix(get_R())); GaussianConditional::const_iterator it; for (it = beginParents(); it!= endParents(); it++) { const Index i = *it; diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index b33098061..2f91a6115 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -60,17 +60,10 @@ public: typedef rsd_type::Column d_type; typedef rsd_type::constColumn const_d_type; - typedef Eigen::LDLT::TranspositionType TranspositionType; - protected: /** Store the conditional as one big upper-triangular wide matrix, arranged * as \f$ [ R S1 S2 ... d ] \f$. Access these blocks using a VerticalBlockView. - * - * WARNING!!! When using with LDL, R is the permuted upper triangular matrix. - * Its columns/rows do not correspond to the correct components of the variables. - * Use R*permutation_ to get back the correct non-permuted order, - * for example when converting to the Jacobian * */ RdMatrix matrix_; rsd_type rsd_; @@ -78,10 +71,6 @@ protected: /** vector of standard deviations */ Vector sigmas_; - /** Store the permutation matrix, used by LDL' in the pivoting process - * This is used to get back the correct ordering of x after solving by backSubstitition */ - TranspositionType permutation_; - /** typedef to base class */ typedef IndexConditional Base; @@ -131,8 +120,7 @@ public: */ template GaussianConditional(ITERATOR firstKey, ITERATOR lastKey, size_t nrFrontals, - const VerticalBlockView& matrices, const Vector& sigmas, - const TranspositionType& permutation = TranspositionType()); + const VerticalBlockView& matrices, const Vector& sigmas); /** Copy constructor */ GaussianConditional(const GaussianConditional& rhs); @@ -160,8 +148,7 @@ public: /** Compute the information matrix */ Matrix computeInformation() const { - Matrix R = get_R() * permutation_.transpose(); - return R.transpose() * R; + return get_R().transpose() * get_R(); } /** Return a view of the upper-triangular R block of the conditional */ @@ -180,11 +167,6 @@ public: /** Get the Vector of sigmas */ const Vector& get_sigmas() const {return sigmas_;} - /** Return the permutation of R made by LDL, to recover R in the correct - * order use R*permutation, see the GaussianConditional main class comment - */ - const TranspositionType& permutation() const { return permutation_; } - protected: const RdMatrix& matrix() const { return matrix_; } @@ -269,9 +251,9 @@ private: template GaussianConditional::GaussianConditional(ITERATOR firstKey, ITERATOR lastKey, size_t nrFrontals, const VerticalBlockView& matrices, - const Vector& sigmas, const GaussianConditional::TranspositionType& permutation) : + const Vector& sigmas) : IndexConditional(std::vector(firstKey, lastKey), nrFrontals), rsd_( - matrix_), sigmas_(sigmas), permutation_(permutation) { + matrix_), sigmas_(sigmas) { rsd_.assignNoalias(matrices); } diff --git a/gtsam/linear/GaussianDensity.cpp b/gtsam/linear/GaussianDensity.cpp index 5300d594f..97e42d9ba 100644 --- a/gtsam/linear/GaussianDensity.cpp +++ b/gtsam/linear/GaussianDensity.cpp @@ -32,7 +32,6 @@ namespace gtsam { gtsam::print(Matrix(get_R()),"R"); gtsam::print(Vector(get_d()),"d"); gtsam::print(sigmas_,"sigmas"); - cout << "Permutation: " << permutation_.indices().transpose() << endl; } /* ************************************************************************* */ diff --git a/gtsam/linear/GaussianFactorGraph.cpp b/gtsam/linear/GaussianFactorGraph.cpp index 0241bc3b3..853cf26ba 100644 --- a/gtsam/linear/GaussianFactorGraph.cpp +++ b/gtsam/linear/GaussianFactorGraph.cpp @@ -557,145 +557,4 @@ break; } // \EliminatePreferCholesky - /* ************************************************************************* */ - GaussianFactorGraph::EliminationResult EliminateLDL( - const FactorGraph& factors, size_t nrFrontals) { - const bool debug = ISDEBUG("EliminateLDL"); - - // Find the scatter and variable dimensions - tic(1, "find scatter"); - Scatter scatter(findScatterAndDims(factors)); - toc(1, "find scatter"); - - // Pull out keys and dimensions - tic(2, "keys"); - vector dimensions(scatter.size() + 1); - BOOST_FOREACH(const Scatter::value_type& var_slot, scatter) { - dimensions[var_slot.second.slot] = var_slot.second.dimension; - } - // This is for the r.h.s. vector - dimensions.back() = 1; - toc(2, "keys"); - - // Form Ab' * Ab - tic(3, "combine"); - - if (debug) { - // print out everything before combine - factors.print("Factors to be combined into hessian"); - cout << "Dimensions (" << dimensions.size() << "): "; - BOOST_FOREACH(size_t d, dimensions) cout << d << " "; - cout << "\nScatter:" << endl; - BOOST_FOREACH(const Scatter::value_type& p, scatter) - cout << " Index: " << p.first << ", " << p.second.toString() << endl; - } - - HessianFactor::shared_ptr // - combinedFactor(new HessianFactor(factors, dimensions, scatter)); - toc(3, "combine"); - - // Do LDL, note that after this, the lower triangle still contains - // some untouched non-zeros that should be zero. We zero them while - // extracting submatrices next. - tic(4, "partial LDL"); - Eigen::LDLT::TranspositionType permutation = combinedFactor->partialLDL(nrFrontals); - toc(4, "partial LDL"); - - // Extract conditional and fill in details of the remaining factor - tic(5, "split"); - GaussianConditional::shared_ptr conditional = - combinedFactor->splitEliminatedFactor(nrFrontals, permutation); - if (debug) { - conditional->print("Extracted conditional: "); - combinedFactor->print("Eliminated factor (L piece): "); - } - toc(5, "split"); - - combinedFactor->assertInvariants(); - return make_pair(conditional, combinedFactor); - } // \EliminateLDL - - /* ************************************************************************* */ - GaussianFactorGraph::EliminationResult EliminatePreferLDL( - const FactorGraph& factors, size_t nrFrontals) { - - typedef JacobianFactor J; - typedef HessianFactor H; - - // If any JacobianFactors have constrained noise models, we have to convert - // all factors to JacobianFactors. Otherwise, we can convert all factors - // to HessianFactors. This is because QR can handle constrained noise - // models but LDL cannot. - - // Decide whether to use QR or LDL - // Check if any JacobianFactors have constrained noise models. - if (hasConstraints(factors)) - return EliminateQR(factors, nrFrontals); - else { - GaussianFactorGraph::EliminationResult ret; -#ifdef NDEBUG - static const bool diag = false; -#else - static const bool diag = !ISDEBUG("NoLDLDiagnostics"); -#endif - if(!diag) { - tic(2, "EliminateLDL"); - ret = EliminateLDL(factors, nrFrontals); - toc(2, "EliminateLDL"); - } else { - try { - tic(2, "EliminateLDL"); - ret = EliminateLDL(factors, nrFrontals); - toc(2, "EliminateLDL"); - } catch (const NegativeMatrixException& e) { - throw; - } catch (const exception& e) { - cout << "Exception in EliminateLDL: " << e.what() << endl; - SETDEBUG("EliminateLDL", true); - SETDEBUG("updateATA", true); - SETDEBUG("JacobianFactor::eliminate", true); - SETDEBUG("JacobianFactor::Combine", true); - SETDEBUG("ldlPartial", true); - SETDEBUG("findScatterAndDims", true); - factors.print("Combining factors: "); - EliminateLDL(factors, nrFrontals); - throw; - } - } - - const bool checkLDL = ISDEBUG("EliminateGaussian Check LDL"); - if (checkLDL) { - GaussianFactorGraph::EliminationResult expected; - FactorGraph jacobians = convertToJacobians(factors); - try { - // Compare with QR - expected = EliminateJacobians(jacobians, nrFrontals); - } catch (...) { - cout << "Exception in QR" << endl; - throw; - } - - H actual_factor(*ret.second); - H expected_factor(*expected.second); - if (!assert_equal(*expected.first, *ret.first, 100.0) || !assert_equal( - expected_factor, actual_factor, 1.0)) { - cout << "LDL and QR do not agree" << endl; - - SETDEBUG("EliminateLDL", true); - SETDEBUG("updateATA", true); - SETDEBUG("JacobianFactor::eliminate", true); - SETDEBUG("JacobianFactor::Combine", true); - jacobians.print("Jacobian Factors: "); - EliminateJacobians(jacobians, nrFrontals); - EliminateLDL(factors, nrFrontals); - factors.print("Combining factors: "); - - throw runtime_error("LDL and QR do not agree"); - } - } - - return ret; - } - } // \EliminatePreferLDL - } // namespace gtsam diff --git a/gtsam/linear/GaussianFactorGraph.h b/gtsam/linear/GaussianFactorGraph.h index 822c8f62a..1f66b5b4c 100644 --- a/gtsam/linear/GaussianFactorGraph.h +++ b/gtsam/linear/GaussianFactorGraph.h @@ -286,47 +286,4 @@ namespace gtsam { GaussianFactorGraph::EliminationResult EliminateCholesky(const FactorGraph< GaussianFactor>& factors, size_t nrFrontals = 1); - /** - * Densely partially eliminate with LDL factorization. JacobianFactors - * are left-multiplied with their transpose to form the Hessian using the - * conversion constructor HessianFactor(const JacobianFactor&). - * - * If any factors contain constrained noise models (any sigmas equal to - * zero), QR factorization will be performed instead, because our current - * implementation cannot handle constrained noise models in LDL - * factorization. EliminateLDL(), on the other hand, will fail if any - * factors contain constrained noise models. - * - * Variables are eliminated in the natural order of the variable indices of in - * the factors. - * @param factors Factors to combine and eliminate - * @param nrFrontals Number of frontal variables to eliminate. - * @return The conditional and remaining factor - - * \ingroup LinearSolving - */ - GaussianFactorGraph::EliminationResult EliminatePreferLDL(const FactorGraph< - GaussianFactor>& factors, size_t nrFrontals = 1); - - /** - * Densely partially eliminate with LDL factorization. JacobianFactors - * are left-multiplied with their transpose to form the Hessian using the - * conversion constructor HessianFactor(const JacobianFactor&). - * - * If any factors contain constrained noise models, this function will fail - * because our current implementation cannot handle constrained noise models - * in LDL factorization. The function EliminatePreferLDL() - * automatically does QR instead when this is the case. - * - * Variables are eliminated in the natural order of the variable indices of in - * the factors. - * @param factors Factors to combine and eliminate - * @param nrFrontals Number of frontal variables to eliminate. - * @return The conditional and remaining factor - - * \ingroup LinearSolving - */ - GaussianFactorGraph::EliminationResult EliminateLDL(const FactorGraph< - GaussianFactor>& factors, size_t nrFrontals = 1); - } // namespace gtsam diff --git a/gtsam/linear/GaussianMultifrontalSolver.cpp b/gtsam/linear/GaussianMultifrontalSolver.cpp index d7e0d1f91..6404ebaef 100644 --- a/gtsam/linear/GaussianMultifrontalSolver.cpp +++ b/gtsam/linear/GaussianMultifrontalSolver.cpp @@ -46,7 +46,7 @@ GaussianBayesTree::shared_ptr GaussianMultifrontalSolver::eliminate() const { if (useQR_) return Base::eliminate(&EliminateQR); else - return Base::eliminate(&EliminatePreferLDL); + return Base::eliminate(&EliminatePreferCholesky); } /* ************************************************************************* */ @@ -56,7 +56,7 @@ VectorValues::shared_ptr GaussianMultifrontalSolver::optimize() const { if (useQR_) values = VectorValues::shared_ptr(new VectorValues(junctionTree_->optimize(&EliminateQR))); else - values= VectorValues::shared_ptr(new VectorValues(junctionTree_->optimize(&EliminatePreferLDL))); + values= VectorValues::shared_ptr(new VectorValues(junctionTree_->optimize(&EliminatePreferCholesky))); toc(2,"optimize"); return values; } @@ -66,7 +66,7 @@ GaussianFactorGraph::shared_ptr GaussianMultifrontalSolver::jointFactorGraph(con if (useQR_) return GaussianFactorGraph::shared_ptr(new GaussianFactorGraph(*Base::jointFactorGraph(js,&EliminateQR))); else - return GaussianFactorGraph::shared_ptr(new GaussianFactorGraph(*Base::jointFactorGraph(js,&EliminatePreferLDL))); + return GaussianFactorGraph::shared_ptr(new GaussianFactorGraph(*Base::jointFactorGraph(js,&EliminatePreferCholesky))); } /* ************************************************************************* */ @@ -74,7 +74,7 @@ GaussianFactor::shared_ptr GaussianMultifrontalSolver::marginalFactor(Index j) c if (useQR_) return Base::marginalFactor(j,&EliminateQR); else - return Base::marginalFactor(j,&EliminatePreferLDL); + return Base::marginalFactor(j,&EliminatePreferCholesky); } /* ************************************************************************* */ @@ -85,8 +85,8 @@ Matrix GaussianMultifrontalSolver::marginalCovariance(Index j) const { fg.push_back(Base::marginalFactor(j,&EliminateQR)); conditional = EliminateQR(fg,1).first; } else { - fg.push_back(Base::marginalFactor(j,&EliminatePreferLDL)); - conditional = EliminatePreferLDL(fg,1).first; + fg.push_back(Base::marginalFactor(j,&EliminatePreferCholesky)); + conditional = EliminatePreferCholesky(fg,1).first; } return conditional->computeInformation().inverse(); } diff --git a/gtsam/linear/GaussianMultifrontalSolver.h b/gtsam/linear/GaussianMultifrontalSolver.h index 8177bed0b..6adde1aa5 100644 --- a/gtsam/linear/GaussianMultifrontalSolver.h +++ b/gtsam/linear/GaussianMultifrontalSolver.h @@ -48,7 +48,7 @@ protected: typedef GenericMultifrontalSolver Base; typedef boost::shared_ptr shared_ptr; - /** flag to determine whether to use LDL or QR */ + /** flag to determine whether to use Cholesky or QR */ bool useQR_; public: diff --git a/gtsam/linear/GaussianSequentialSolver.cpp b/gtsam/linear/GaussianSequentialSolver.cpp index 08b2461e8..b83926647 100644 --- a/gtsam/linear/GaussianSequentialSolver.cpp +++ b/gtsam/linear/GaussianSequentialSolver.cpp @@ -53,7 +53,7 @@ GaussianBayesNet::shared_ptr GaussianSequentialSolver::eliminate() const { if (useQR_) return Base::eliminate(&EliminateQR); else - return Base::eliminate(&EliminatePreferLDL); + return Base::eliminate(&EliminatePreferCholesky); } /* ************************************************************************* */ @@ -90,7 +90,7 @@ GaussianFactor::shared_ptr GaussianSequentialSolver::marginalFactor(Index j) con if (useQR_) return Base::marginalFactor(j,&EliminateQR); else - return Base::marginalFactor(j,&EliminatePreferLDL); + return Base::marginalFactor(j,&EliminatePreferCholesky); } /* ************************************************************************* */ @@ -101,8 +101,8 @@ Matrix GaussianSequentialSolver::marginalCovariance(Index j) const { fg.push_back(Base::marginalFactor(j, &EliminateQR)); conditional = EliminateQR(fg, 1).first; } else { - fg.push_back(Base::marginalFactor(j, &EliminatePreferLDL)); - conditional = EliminatePreferLDL(fg, 1).first; + fg.push_back(Base::marginalFactor(j, &EliminatePreferCholesky)); + conditional = EliminatePreferCholesky(fg, 1).first; } return conditional->computeInformation().inverse(); } @@ -115,7 +115,7 @@ GaussianSequentialSolver::jointFactorGraph(const std::vector& js) const { *Base::jointFactorGraph(js, &EliminateQR))); else return GaussianFactorGraph::shared_ptr(new GaussianFactorGraph( - *Base::jointFactorGraph(js, &EliminatePreferLDL))); + *Base::jointFactorGraph(js, &EliminatePreferCholesky))); } } /// namespace gtsam diff --git a/gtsam/linear/GaussianSequentialSolver.h b/gtsam/linear/GaussianSequentialSolver.h index cb0b76ec8..2fad97867 100644 --- a/gtsam/linear/GaussianSequentialSolver.h +++ b/gtsam/linear/GaussianSequentialSolver.h @@ -54,7 +54,7 @@ protected: typedef GenericSequentialSolver Base; typedef boost::shared_ptr shared_ptr; - /** flag to determine whether to use LDL or QR */ + /** flag to determine whether to use Cholesky or QR */ bool useQR_; public: diff --git a/gtsam/linear/HessianFactor.cpp b/gtsam/linear/HessianFactor.cpp index 8402e4f91..b1f829005 100644 --- a/gtsam/linear/HessianFactor.cpp +++ b/gtsam/linear/HessianFactor.cpp @@ -251,7 +251,7 @@ HessianFactor::HessianFactor(const FactorGraph& factors, const vector& dimensions, const Scatter& scatter) : info_(matrix_) { - const bool debug = ISDEBUG("EliminateCholesky") || ISDEBUG("EliminateLDL"); + const bool debug = ISDEBUG("EliminateCholesky"); // Form Ab' * Ab tic(1, "allocate"); info_.resize(dimensions.begin(), dimensions.end(), false); @@ -472,19 +472,13 @@ void HessianFactor::partialCholesky(size_t nrFrontals) { } /* ************************************************************************* */ -Eigen::LDLT::TranspositionType HessianFactor::partialLDL(size_t nrFrontals) { - return ldlPartial(matrix_, info_.offset(nrFrontals)); -} - -/* ************************************************************************* */ -GaussianConditional::shared_ptr -HessianFactor::splitEliminatedFactor(size_t nrFrontals, const Eigen::LDLT::TranspositionType& permutation) { +GaussianConditional::shared_ptr HessianFactor::splitEliminatedFactor(size_t nrFrontals) { static const bool debug = false; // Extract conditionals tic(1, "extract conditionals"); - GaussianConditional::shared_ptr conditionals(new GaussianConditional()); + GaussianConditional::shared_ptr conditional(new GaussianConditional()); typedef VerticalBlockView BlockAb; BlockAb Ab(matrix_, info_); @@ -492,12 +486,11 @@ HessianFactor::splitEliminatedFactor(size_t nrFrontals, const Eigen::LDLT(keys_.begin(), keys_.end(), nrFrontals, Ab, sigmas, permutation); + conditional = boost::make_shared(keys_.begin(), keys_.end(), nrFrontals, Ab, sigmas); toc(2, "construct cond"); - if(debug) conditionals->print("Extracted conditional: "); + if(debug) conditional->print("Extracted conditional: "); toc(1, "extract conditionals"); @@ -510,7 +503,7 @@ HessianFactor::splitEliminatedFactor(size_t nrFrontals, const Eigen::LDLT::TranspositionType partialLDL(size_t nrFrontals); - /** split partially eliminated factor */ - boost::shared_ptr splitEliminatedFactor( - size_t nrFrontals, const Eigen::LDLT::TranspositionType& permutation = Eigen::LDLT::TranspositionType()); + boost::shared_ptr splitEliminatedFactor(size_t nrFrontals); /** assert invariants */ void assertInvariants() const; diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp index 688e23fe4..e2facec40 100644 --- a/gtsam/linear/JacobianFactor.cpp +++ b/gtsam/linear/JacobianFactor.cpp @@ -152,7 +152,6 @@ namespace gtsam { /* ************************************************************************* */ JacobianFactor::JacobianFactor(const GaussianConditional& cg) : GaussianFactor(cg), model_(noiseModel::Diagonal::Sigmas(cg.get_sigmas(), true)), Ab_(matrix_) { Ab_.assignNoalias(cg.rsd_); - Ab_.range(0,cg.nrFrontals()) = Ab_.range(0,cg.nrFrontals()) * cg.permutation_.transpose(); firstNonzeroBlocks_.resize(cg.get_d().size(), 0); // set sigmas from precisions assertInvariants(); } diff --git a/gtsam/linear/KalmanFilter.cpp b/gtsam/linear/KalmanFilter.cpp index 56a2692f3..0df9d99ce 100644 --- a/gtsam/linear/KalmanFilter.cpp +++ b/gtsam/linear/KalmanFilter.cpp @@ -13,7 +13,7 @@ * @file KalmanFilter.cpp * * @brief Simple linear Kalman filter. - * Implemented using factor graphs, i.e., does LDL-based SRIF, really. + * Implemented using factor graphs, i.e., does Cholesky-based SRIF, really. * * @date Sep 3, 2011 * @author Stephen Williams diff --git a/gtsam/linear/KalmanFilter.h b/gtsam/linear/KalmanFilter.h index bff512b95..b74a0db7d 100644 --- a/gtsam/linear/KalmanFilter.h +++ b/gtsam/linear/KalmanFilter.h @@ -13,7 +13,7 @@ * @file testKalmanFilter.cpp * * Simple linear Kalman filter. - * Implemented using factor graphs, i.e., does LDL-based SRIF, really. + * Implemented using factor graphs, i.e., does Cholesky-based SRIF, really. * * @date Sep 3, 2011 * @author Stephen Williams @@ -49,7 +49,7 @@ namespace gtsam { * The type below allows you to specify the factorization variant. */ enum Factorization { - QR, LDL + QR, CHOLESKY }; /** diff --git a/gtsam/linear/tests/testGaussianConditional.cpp b/gtsam/linear/tests/testGaussianConditional.cpp index b19a1fda5..93030b598 100644 --- a/gtsam/linear/tests/testGaussianConditional.cpp +++ b/gtsam/linear/tests/testGaussianConditional.cpp @@ -191,7 +191,6 @@ TEST( GaussianConditional, solve ) /* ************************************************************************* */ TEST( GaussianConditional, solve_simple ) { - // no pivoting from LDL, so R matrix is not permuted Matrix full_matrix = Matrix_(4, 7, 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1, 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2, @@ -231,7 +230,6 @@ TEST( GaussianConditional, solve_simple ) TEST( GaussianConditional, solve_multifrontal ) { // create full system, 3 variables, 2 frontals, all 2 dim - // no pivoting from LDL, so R matrix is not permuted Matrix full_matrix = Matrix_(4, 7, 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1, 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2, @@ -273,7 +271,6 @@ TEST( GaussianConditional, solve_multifrontal ) TEST( GaussianConditional, solve_multifrontal_permuted ) { // create full system, 3 variables, 2 frontals, all 2 dim - // no pivoting from LDL, so R matrix is not permuted Matrix full_matrix = Matrix_(4, 7, 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1, 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2, @@ -367,30 +364,11 @@ TEST( GaussianConditional, computeInformation ) { 0, 0, 8, 9, 0, 0, 0, 10).finished(); - // Shuffle columns - Eigen::Transpositions p(4); - p.indices()[0] = 1; - p.indices()[1] = 1; - p.indices()[2] = 2; - p.indices()[3] = 0; - - // The expected result of permuting R - Matrix RpExpected = (Matrix(4,4) << - 2, 4, 3, 1, - 5, 7, 6, 0, - 0, 9, 8, 0, - 0, 10,0, 0).finished(); - - // Check that the permutation does what we expect - Matrix RpActual = R * p.transpose(); - EXPECT(assert_equal(RpExpected, RpActual)); - - // Create conditional with the permutation + // Create conditional GaussianConditional conditional(0, Vector::Zero(4), R, Vector::Constant(4, 1.0)); - conditional.permutation_ = p; // Expected information matrix (using permuted R) - Matrix IExpected = RpExpected.transpose() * RpExpected; + Matrix IExpected = R.transpose() * R; // Actual information matrix (conditional should permute R) Matrix IActual = conditional.computeInformation(); @@ -407,23 +385,8 @@ TEST( GaussianConditional, isGaussianFactor ) { 0, 0, 8, 9, 0, 0, 0, 10).finished(); - // Shuffle columns - Eigen::Transpositions p(4); - p.indices()[0] = 1; - p.indices()[1] = 1; - p.indices()[2] = 2; - p.indices()[3] = 0; - - // The expected result of the permutation - Matrix RpExpected = (Matrix(4,4) << - 4, 1, 3, 2, - 7, 0, 6, 5, - 9, 0, 8, 0, - 10,0, 0, 0).finished(); - - // Create a conditional with this permutation + // Create a conditional GaussianConditional conditional(0, Vector::Zero(4), R, Vector::Constant(4, 1.0)); - conditional.permutation_ = p; // Expected information matrix computed by conditional Matrix IExpected = conditional.computeInformation(); diff --git a/gtsam/linear/tests/testGaussianJunctionTree.cpp b/gtsam/linear/tests/testGaussianJunctionTree.cpp index 3a9bcb4ad..a10f80a4d 100644 --- a/gtsam/linear/tests/testGaussianJunctionTree.cpp +++ b/gtsam/linear/tests/testGaussianJunctionTree.cpp @@ -104,7 +104,7 @@ TEST( GaussianJunctionTree, eliminate ) } /* ************************************************************************* */ -TEST_UNSAFE( GaussianJunctionTree, GBNConstructor ) +TEST( GaussianJunctionTree, GBNConstructor ) { GaussianFactorGraph fg = createChain(); GaussianJunctionTree jt(fg); @@ -167,6 +167,25 @@ TEST(GaussianJunctionTree, complicatedMarginal) { 0.7943, 0.1656, 0.3112, 0.6020).finished()), 2, (Vector(3) << 0.9619, 0.0046, 0.7749).finished(), ones(3))); +// GaussianConditional::shared_ptr R_5_6(new GaussianConditional( +// pair_list_of +// (5, (Matrix(3,1) << +// 0.2435, +// 0, +// 0).finished()) +// (6, (Matrix(3,2) << +// 0.4733, 0.1966, +// 0.9022, 0.0979, +// 0.0, 0.2312).finished()) // Attempted to recreate without permutation +// (7, (Matrix(3,1) << +// 0.5853, +// 1.0589, +// 0.1487).finished()) +// (8, (Matrix(3,2) << +// 0.2858, 0.3804, +// 0.9893, 0.2912, +// 0.4035, 0.4933).finished()), +// 2, (Vector(3) << 0.8173, 0.4164, 0.7671).finished(), ones(3))); GaussianConditional::shared_ptr R_5_6(new GaussianConditional( pair_list_of (5, (Matrix(3,1) << @@ -174,9 +193,13 @@ TEST(GaussianJunctionTree, complicatedMarginal) { 0, 0).finished()) (6, (Matrix(3,2) << - 0.1966, 0.4733, - 0.2511, 0.3517, - 0, 0.8308).finished()) + 0.4733, 0.1966, + 0.3517, 0.2511, + 0.8308, 0.0).finished()) // NOTE the non-upper-triangular form + // here since this test was written when we had column permutations + // from LDL. The code still works currently (does not enfore + // upper-triangularity in this case) but this test will need to be + // redone if this stops working in the future (7, (Matrix(3,1) << 0.5853, 0.5497, @@ -186,9 +209,6 @@ TEST(GaussianJunctionTree, complicatedMarginal) { 0.7572, 0.5678, 0.7537, 0.0759).finished()), 2, (Vector(3) << 0.8173, 0.8687, 0.0844).finished(), ones(3))); - R_5_6->permutation_ = Eigen::Transpositions(2); - R_5_6->permutation_.indices()(0) = 0; - R_5_6->permutation_.indices()(1) = 2; GaussianConditional::shared_ptr R_7_8(new GaussianConditional( pair_list_of (7, (Matrix(3,1) << @@ -251,15 +271,15 @@ TEST(GaussianJunctionTree, complicatedMarginal) { // Marginal on 5 Matrix expectedCov = (Matrix(1,1) << 236.5166).finished(); - JacobianFactor::shared_ptr actualJacobianLDL = boost::dynamic_pointer_cast( - bt.marginalFactor(5, EliminateLDL)); + JacobianFactor::shared_ptr actualJacobianChol= boost::dynamic_pointer_cast( + bt.marginalFactor(5, EliminateCholesky)); JacobianFactor::shared_ptr actualJacobianQR = boost::dynamic_pointer_cast( bt.marginalFactor(5, EliminateQR)); - CHECK(assert_equal(*actualJacobianLDL, *actualJacobianQR)); // Check that LDL and QR obtained marginals are the same - LONGS_EQUAL(1, actualJacobianLDL->rows()); - LONGS_EQUAL(1, actualJacobianLDL->size()); - LONGS_EQUAL(5, actualJacobianLDL->keys()[0]); - Matrix actualA = actualJacobianLDL->getA(actualJacobianLDL->begin()); + CHECK(assert_equal(*actualJacobianChol, *actualJacobianQR)); // Check that Chol and QR obtained marginals are the same + LONGS_EQUAL(1, actualJacobianChol->rows()); + LONGS_EQUAL(1, actualJacobianChol->size()); + LONGS_EQUAL(5, actualJacobianChol->keys()[0]); + Matrix actualA = actualJacobianChol->getA(actualJacobianChol->begin()); Matrix actualCov = inverse(actualA.transpose() * actualA); EXPECT(assert_equal(expectedCov, actualCov, 1e-1)); @@ -270,15 +290,15 @@ TEST(GaussianJunctionTree, complicatedMarginal) { expectedCov = (Matrix(2,2) << 1015.8, 2886.2, 2886.2, 8471.2).finished(); - actualJacobianLDL = boost::dynamic_pointer_cast( - bt.marginalFactor(6, EliminateLDL)); + actualJacobianChol = boost::dynamic_pointer_cast( + bt.marginalFactor(6, EliminateCholesky)); actualJacobianQR = boost::dynamic_pointer_cast( bt.marginalFactor(6, EliminateQR)); - CHECK(assert_equal(*actualJacobianLDL, *actualJacobianQR)); // Check that LDL and QR obtained marginals are the same - LONGS_EQUAL(2, actualJacobianLDL->rows()); - LONGS_EQUAL(1, actualJacobianLDL->size()); - LONGS_EQUAL(6, actualJacobianLDL->keys()[0]); - actualA = actualJacobianLDL->getA(actualJacobianLDL->begin()); + CHECK(assert_equal(*actualJacobianChol, *actualJacobianQR)); // Check that Chol and QR obtained marginals are the same + LONGS_EQUAL(2, actualJacobianChol->rows()); + LONGS_EQUAL(1, actualJacobianChol->size()); + LONGS_EQUAL(6, actualJacobianChol->keys()[0]); + actualA = actualJacobianChol->getA(actualJacobianChol->begin()); actualCov = inverse(actualA.transpose() * actualA); EXPECT(assert_equal(expectedCov, actualCov, 1e1)); diff --git a/gtsam/linear/tests/testHessianFactor.cpp b/gtsam/linear/tests/testHessianFactor.cpp index c2ccdb4a7..59873862a 100644 --- a/gtsam/linear/tests/testHessianFactor.cpp +++ b/gtsam/linear/tests/testHessianFactor.cpp @@ -414,7 +414,7 @@ TEST_UNSAFE(HessianFactor, CombineAndEliminate) // create expected Hessian after elimination HessianFactor expectedCholeskyFactor(expectedFactor); - GaussianFactorGraph::EliminationResult actualCholesky = EliminateLDL( + GaussianFactorGraph::EliminationResult actualCholesky = EliminateCholesky( *gfg.convertCastFactors > (), 1); HessianFactor::shared_ptr actualFactor = boost::dynamic_pointer_cast< HessianFactor>(actualCholesky.second); @@ -465,7 +465,7 @@ TEST(HessianFactor, eliminate2 ) FactorGraph combinedLFG_Chol; combinedLFG_Chol.push_back(combinedLF_Chol); - GaussianFactorGraph::EliminationResult actual_Chol = EliminateLDL( + GaussianFactorGraph::EliminationResult actual_Chol = EliminateCholesky( combinedLFG_Chol, 1); HessianFactor::shared_ptr actualFactor = boost::dynamic_pointer_cast< HessianFactor>(actual_Chol.second); @@ -576,16 +576,6 @@ TEST(HessianFactor, eliminateUnsorted) { EXPECT(assert_equal(*expected_bn, *actual_bn, 1e-10)); EXPECT(assert_equal(*expected_factor, *actual_factor, 1e-10)); - - // Test LDL - boost::tie(expected_bn, expected_factor) = - EliminatePreferLDL(sortedGraph, 1); - - boost::tie(actual_bn, actual_factor) = - EliminatePreferLDL(unsortedGraph, 1); - - EXPECT(assert_equal(*expected_bn, *actual_bn, 1e-10)); - EXPECT(assert_equal(*expected_factor, *actual_factor, 1e-10)); } /* ************************************************************************* */ diff --git a/gtsam/linear/tests/testKalmanFilter.cpp b/gtsam/linear/tests/testKalmanFilter.cpp index 3fb8a91c6..ca50c12e9 100644 --- a/gtsam/linear/tests/testKalmanFilter.cpp +++ b/gtsam/linear/tests/testKalmanFilter.cpp @@ -165,7 +165,7 @@ TEST( KalmanFilter, predict ) { } /* ************************************************************************* */ -// Test both QR and LDL versions in case of a realistic (AHRS) dynamics update +// Test both QR and Cholesky versions in case of a realistic (AHRS) dynamics update TEST( KalmanFilter, QRvsCholesky ) { Vector mean = ones(9); @@ -180,8 +180,8 @@ TEST( KalmanFilter, QRvsCholesky ) { 63.8, -0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 625.0, 0.0, -0.6, -0.1, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 625.0); - // Create two Kalman filter of dimension 9, one using QR the other LDL - KalmanFilter kfa(9, KalmanFilter::QR), kfb(9, KalmanFilter::LDL); + // Create two Kalman filter of dimension 9, one using QR the other Cholesky + KalmanFilter kfa(9, KalmanFilter::QR), kfb(9, KalmanFilter::CHOLESKY); // create corresponding initial states KalmanFilter::State p0a = kfa.init(mean, covariance); diff --git a/gtsam/nonlinear/ExtendedKalmanFilter.h b/gtsam/nonlinear/ExtendedKalmanFilter.h index 13a612f84..353439d98 100644 --- a/gtsam/nonlinear/ExtendedKalmanFilter.h +++ b/gtsam/nonlinear/ExtendedKalmanFilter.h @@ -26,7 +26,7 @@ namespace gtsam { /** * This is a generic Extended Kalman Filter class implemented using nonlinear factors. GTSAM - * basically does SRIF with LDL to solve the filter problem, making this an efficient, numerically + * basically does SRIF with Cholesky to solve the filter problem, making this an efficient, numerically * stable Kalman Filter implementation. * * The Kalman Filter relies on two models: a motion model that predicts the next state using diff --git a/gtsam/nonlinear/ISAM2-impl.cpp b/gtsam/nonlinear/ISAM2-impl.cpp index ec3b4edfd..8fe117e34 100644 --- a/gtsam/nonlinear/ISAM2-impl.cpp +++ b/gtsam/nonlinear/ISAM2-impl.cpp @@ -246,7 +246,7 @@ ISAM2::Impl::PartialSolve(GaussianFactorGraph& factors, tic(7,"eliminate"); JunctionTree jt(factors, affectedFactorsIndex); if(!useQR) - result.bayesTree = jt.eliminate(EliminatePreferLDL); + result.bayesTree = jt.eliminate(EliminatePreferCholesky); else result.bayesTree = jt.eliminate(EliminateQR); toc(7,"eliminate"); @@ -338,7 +338,7 @@ void updateDoglegDeltas(const boost::shared_ptr& clique, std::vecto Vector gS = internal::extractVectorValuesSlices(grad, (*clique)->beginParents(), (*clique)->endParents()); // Compute R*g and S*g for this clique - Vector RSgProd = ((*clique)->get_R() * (*clique)->permutation().transpose()) * gR + (*clique)->get_S() * gS; + Vector RSgProd = (*clique)->get_R() * gR + (*clique)->get_S() * gS; // Write into RgProd vector internal::writeVectorValuesSlices(RSgProd, RgProd, (*clique)->beginFrontals(), (*clique)->endFrontals()); diff --git a/gtsam/nonlinear/ISAM2-impl.h b/gtsam/nonlinear/ISAM2-impl.h index e9046b3e3..34573f0c5 100644 --- a/gtsam/nonlinear/ISAM2-impl.h +++ b/gtsam/nonlinear/ISAM2-impl.h @@ -118,7 +118,7 @@ struct ISAM2::Impl { * problem. This is permuted during use and so is cleared when this function * returns in order to invalidate it. * \param keys The set of indices used in \c factors. - * \param useQR Whether to use QR (if true), or LDL (if false). + * \param useQR Whether to use QR (if true), or Cholesky (if false). * \return The eliminated BayesTree and the permutation to be applied to the * rest of the ISAM2 data. */ diff --git a/gtsam/nonlinear/ISAM2.cpp b/gtsam/nonlinear/ISAM2.cpp index 2f7ac8aaf..bae2252d6 100644 --- a/gtsam/nonlinear/ISAM2.cpp +++ b/gtsam/nonlinear/ISAM2.cpp @@ -326,8 +326,8 @@ boost::shared_ptr > ISAM2::recalculate( tic(5,"eliminate"); JunctionTree jt(linearFactors_, variableIndex_); sharedClique newRoot; - if(params_.factorization == ISAM2Params::LDL) - newRoot = jt.eliminate(EliminatePreferLDL); + if(params_.factorization == ISAM2Params::CHOLESKY) + newRoot = jt.eliminate(EliminatePreferCholesky); else if(params_.factorization == ISAM2Params::QR) newRoot = jt.eliminate(EliminateQR); else assert(false); diff --git a/gtsam/nonlinear/ISAM2.h b/gtsam/nonlinear/ISAM2.h index 96de41f8f..30f0b14d2 100644 --- a/gtsam/nonlinear/ISAM2.h +++ b/gtsam/nonlinear/ISAM2.h @@ -107,14 +107,14 @@ struct ISAM2Params { bool evaluateNonlinearError; ///< Whether to evaluate the nonlinear error before and after the update, to return in ISAM2Result from update() - enum Factorization { LDL, QR }; - /** Specifies whether to use QR or LDL numerical factorization (default: LDL). - * LDL is faster but potentially numerically unstable for poorly-conditioned problems, which can occur when + enum Factorization { CHOLESKY, QR }; + /** Specifies whether to use QR or CHOESKY numerical factorization (default: CHOLESKY). + * Cholesky is faster but potentially numerically unstable for poorly-conditioned problems, which can occur when * uncertainty is very low in some variables (or dimensions of variables) and very high in others. QR is - * slower but more numerically stable in poorly-conditioned problems. We suggest using the default of LDL + * slower but more numerically stable in poorly-conditioned problems. We suggest using the default of Cholesky * unless gtsam sometimes throws NegativeMatrixException when your problem's Hessian is actually positive * definite. For positive definite problems, numerical error accumulation can cause the problem to become - * numerically negative or indefinite as solving proceeds, especially when using LDL. + * numerically negative or indefinite as solving proceeds, especially when using Cholesky. */ Factorization factorization; @@ -136,7 +136,7 @@ struct ISAM2Params { int _relinearizeSkip = 10, ///< see ISAM2Params::relinearizeSkip bool _enableRelinearization = true, ///< see ISAM2Params::enableRelinearization bool _evaluateNonlinearError = false, ///< see ISAM2Params::evaluateNonlinearError - Factorization _factorization = ISAM2Params::LDL, ///< see ISAM2Params::factorization + Factorization _factorization = ISAM2Params::CHOLESKY, ///< see ISAM2Params::factorization bool _cacheLinearizedFactors = true, ///< see ISAM2Params::cacheLinearizedFactors const KeyFormatter& _keyFormatter = DefaultKeyFormatter ///< see ISAM2::Params::keyFormatter ) : optimizationParams(_optimizationParams), relinearizeThreshold(_relinearizeThreshold), @@ -258,7 +258,7 @@ struct ISAM2Clique : public BayesTreeCliqueBase ordering; ///< The variable elimination ordering, or empty to use COLAMD (default: empty) SuccessiveLinearizationParams() : - elimination(MULTIFRONTAL), factorization(LDL) {} + elimination(MULTIFRONTAL), factorization(CHOLESKY) {} virtual ~SuccessiveLinearizationParams() {} @@ -57,8 +56,6 @@ public: if(factorization == CHOLESKY) std::cout << " factorization method: CHOLESKY\n"; - else if(factorization == LDL) - std::cout << " factorization method: LDL\n"; else if(factorization == QR) std::cout << " factorization method: QR\n"; else @@ -75,8 +72,6 @@ public: GaussianFactorGraph::Eliminate getEliminationFunction() const { if(factorization == SuccessiveLinearizationParams::CHOLESKY) return EliminatePreferCholesky; - else if(factorization == SuccessiveLinearizationParams::LDL) - return EliminatePreferLDL; else if(factorization == SuccessiveLinearizationParams::QR) return EliminateQR; else diff --git a/tests/testGaussianFactorGraphB.cpp b/tests/testGaussianFactorGraphB.cpp index 8d7d96ea4..2e68200f5 100644 --- a/tests/testGaussianFactorGraphB.cpp +++ b/tests/testGaussianFactorGraphB.cpp @@ -476,7 +476,7 @@ TEST( GaussianFactorGraph, getOrdering) //} /* ************************************************************************* */ -TEST( GaussianFactorGraph, optimize_LDL ) +TEST( GaussianFactorGraph, optimize_Cholesky ) { // create an ordering Ordering ord; ord += kx(2),kl(1),kx(1); diff --git a/tests/testGaussianJunctionTreeB.cpp b/tests/testGaussianJunctionTreeB.cpp index c6b935c21..074693123 100644 --- a/tests/testGaussianJunctionTreeB.cpp +++ b/tests/testGaussianJunctionTreeB.cpp @@ -219,8 +219,8 @@ TEST(GaussianJunctionTree, simpleMarginal) { // Compute marginal directly from BayesTree GaussianBayesTree gbt; - gbt.insert(GaussianJunctionTree(gfg).eliminate(EliminateLDL)); - marginalFactor = gbt.marginalFactor(1, EliminateLDL); + gbt.insert(GaussianJunctionTree(gfg).eliminate(EliminateCholesky)); + marginalFactor = gbt.marginalFactor(1, EliminateCholesky); marginalJacobian = boost::dynamic_pointer_cast(marginalFactor); Matrix actual3 = inverse(marginalJacobian->getA(marginalJacobian->begin()).transpose() * marginalJacobian->getA(marginalJacobian->begin())); diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 80754ce02..d1eb01f27 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -151,10 +151,8 @@ TEST( NonlinearOptimizer, optimization_method ) { LevenbergMarquardtParams paramsQR; paramsQR.factorization = LevenbergMarquardtParams::QR; - LevenbergMarquardtParams paramsLDL; - paramsLDL.factorization = LevenbergMarquardtParams::LDL; LevenbergMarquardtParams paramsChol; - paramsLDL.factorization = LevenbergMarquardtParams::CHOLESKY; + paramsChol.factorization = LevenbergMarquardtParams::CHOLESKY; example::Graph fg = example::createReallyNonlinearFactorGraph(); @@ -165,9 +163,6 @@ TEST( NonlinearOptimizer, optimization_method ) Values actualMFQR = LevenbergMarquardtOptimizer(fg, c0, paramsQR).optimize(); DOUBLES_EQUAL(0,fg.error(actualMFQR),tol); - Values actualMFLDL = LevenbergMarquardtOptimizer(fg, c0, paramsLDL).optimize(); - DOUBLES_EQUAL(0,fg.error(actualMFLDL),tol); - Values actualMFChol = LevenbergMarquardtOptimizer(fg, c0, paramsChol).optimize(); DOUBLES_EQUAL(0,fg.error(actualMFChol),tol); }